You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ma...@apache.org on 2018/05/17 14:30:14 UTC

[01/50] [abbrv] carbondata git commit: [CARBONDATA-2443][SDK]Added the test cases for avro multilevel complex type support

Repository: carbondata
Updated Branches:
  refs/heads/spark-2.3 3262230cb -> 8fe165668


[CARBONDATA-2443][SDK]Added the test cases for avro multilevel complex type support

This closes #2276


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/74ea24d1
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/74ea24d1
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/74ea24d1

Branch: refs/heads/spark-2.3
Commit: 74ea24d141c9a1dd1dd820750ca0387d2ff64171
Parents: ec33c11
Author: ajantha-bhat <aj...@gmail.com>
Authored: Mon May 7 14:39:40 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Tue May 8 16:50:38 2018 +0530

----------------------------------------------------------------------
 .../schema/table/TableSchemaBuilder.java        |   2 +-
 .../TestNonTransactionalCarbonTable.scala       | 510 ++++++++++++++++++-
 2 files changed, 499 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/74ea24d1/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
index e3c07fa..ca082e1 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
@@ -200,7 +200,7 @@ public class TableSchemaBuilder {
   }
 
   private String getColNameForArray(String parentFieldName) {
-    if (!parentFieldName.contains(".val")) {
+    if (!parentFieldName.endsWith(".val")) {
       return "val";
     } else {
       String[] splits = parentFieldName.split("val");

http://git-wip-us.apache.org/repos/asf/carbondata/blob/74ea24d1/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index 6b02d5a..c641ed3 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -735,7 +735,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
     val json = """ {"name":"bob", "age":10, "address" : {"street":"abc", "city":"bang"}} """
 
-
     val fields = new Array[Field](3)
     fields(0) = new Field("name", DataTypes.STRING)
     fields(1) = new Field("age", DataTypes.INT)
@@ -902,9 +901,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |{"street":"ghi","city":"city3"},
         |{"street":"jkl","city":"city4"}]} """.stripMargin
 
-
-
-
     val fields = new Array[Field](3)
     fields(0) = new Field("name", DataTypes.STRING)
     fields(1) = new Field("age", DataTypes.INT)
@@ -988,20 +984,16 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
                  |	}
                  |} """.stripMargin
 
-
-
-
-
     val fields = new Array[Field](3)
     fields(0) = new Field("name", DataTypes.STRING)
     fields(1) = new Field("age", DataTypes.INT)
 
-    val fld1 = new util.ArrayList[StructField]
-    fld1.add(new StructField("eachDoorNum", DataTypes.INT))
-
     val fld2 = new util.ArrayList[StructField]
     fld2.add(new StructField("street", DataTypes.STRING))
     fld2.add(new StructField("city", DataTypes.STRING))
+
+    val fld1 = new util.ArrayList[StructField]
+    fld1.add(new StructField("eachDoorNum", DataTypes.INT))
     fld2.add(new StructField("doorNum", DataTypes.createArrayType(DataTypes.INT), fld1))
 
     fields(2) = new Field("address","struct",fld2)
@@ -1050,7 +1042,8 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
     sql("DROP TABLE sdkOutputTable")
     // drop table should not delete the files
-    assert(new File(writerPath).exists())
+    assert(new File(writerPath).listFiles().length > 0)
+    cleanTestData()
   }
 
   test("Read sdk writer Avro output with both Array and Struct Type") {
@@ -1077,6 +1070,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       Row("bob", 10, Row("abc","bang"), mutable.WrappedArray.newBuilder[Int].+=(1,2,3,4))))
     sql("DROP TABLE sdkOutputTable")
     // drop table should not delete the files
+    cleanTestData()
   }
 
 
@@ -1102,6 +1096,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
     sql("DROP TABLE sdkOutputTable")
     // drop table should not delete the files
+    cleanTestData()
   }
 
 
@@ -1128,6 +1123,497 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
     sql("DROP TABLE sdkOutputTable")
     // drop table should not delete the files
+    cleanTestData()
+  }
+
+  // test multi level -- 3 levels [array of struct of array of int]
+  def buildAvroTestDataMultiLevel3(rows: Int, options: util.Map[String, String]): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+
+    val mySchema = """ {
+                     |	"name": "address",
+                     |	"type": "record",
+                     |	"fields": [
+                     |		{
+                     |			"name": "name",
+                     |			"type": "string"
+                     |		},
+                     |		{
+                     |			"name": "age",
+                     |			"type": "int"
+                     |		},
+                     |		{
+                     |			"name": "doorNum",
+                     |			"type": {
+                     |				"type": "array",
+                     |				"items": {
+                     |					"type": "record",
+                     |					"name": "my_address",
+                     |					"fields": [
+                     |						{
+                     |							"name": "street",
+                     |							"type": "string"
+                     |						},
+                     |						{
+                     |							"name": "city",
+                     |							"type": "string"
+                     |						},
+                     |						{
+                     |							"name": "FloorNum",
+                     |							"type": {
+                     |								"type": "array",
+                     |								"items": {
+                     |									"name": "floor",
+                     |									"type": "int"
+                     |								}
+                     |							}
+                     |						}
+                     |					]
+                     |				}
+                     |			}
+                     |		}
+                     |	]
+                     |} """.stripMargin
+    val json =
+      """ {
+        |	"name": "bob",
+        |	"age": 10,
+        |	"doorNum": [
+        |		{
+        |			"street": "abc",
+        |			"city": "city1",
+        |			"FloorNum": [0,1,2]
+        |		},
+        |		{
+        |			"street": "def",
+        |			"city": "city2",
+        |			"FloorNum": [3,4,5]
+        |		},
+        |		{
+        |			"street": "ghi",
+        |			"city": "city3",
+        |			"FloorNum": [6,7,8]
+        |		},
+        |		{
+        |			"street": "jkl",
+        |			"city": "city4",
+        |			"FloorNum": [9,10,11]
+        |		}
+        |	]
+        |} """.stripMargin
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("street", DataTypes.STRING))
+    fld.add(new StructField("city", DataTypes.STRING))
+    fld.add(new StructField("FloorNum", DataTypes.createArrayType(DataTypes.INT)))
+
+    val fld2 = new util.ArrayList[StructField]
+    fld2.add(new StructField("my_address", DataTypes.createStructType(fld), fld))
+    fields(2) = new Field("doorNum", DataTypes.createArrayType(fld2.get(0).getDataType), fld2)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+  }
+
+  def buildAvroTestDataMultiLevel3Type(): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+    buildAvroTestDataMultiLevel3(3, null)
+  }
+
+  // test multi level -- 3 levels [array of struct of array of int]
+  test("test multi level support : array of struct of array of int") {
+    buildAvroTestDataMultiLevel3Type()
+    assert(new File(writerPath).exists())
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    sql("select * from sdkOutputTable").show(false)
+
+    // TODO: Add a validation
+    /*
+    +----+---+-----------------------------------------------------------------------------------+
+    |name|age|doorNum
+                                                               |
+    +----+---+-----------------------------------------------------------------------------------+
+    |bob |10 |[[abc,city1,WrappedArray(0, 1, 2)], [def,city2,WrappedArray(3, 4, 5)], [ghi,city3,
+    WrappedArray(6, 7, 8)], [jkl,city4,WrappedArray(9, 10, 11)]]|
+    |bob |10 |[[abc,city1,WrappedArray(0, 1, 2)], [def,city2,WrappedArray(3, 4, 5)], [ghi,city3,
+    WrappedArray(6, 7, 8)], [jkl,city4,WrappedArray(9, 10, 11)]]|
+    |bob |10 |[[abc,city1,WrappedArray(0, 1, 2)], [def,city2,WrappedArray(3, 4, 5)], [ghi,city3,
+    WrappedArray(6, 7, 8)], [jkl,city4,WrappedArray(9, 10, 11)]]|
+    +----+---+-----------------------------------------------------------------------------------+*/
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    cleanTestData()
+  }
+
+
+  // test multi level -- 3 levels [array of struct of struct of string, int]
+  def buildAvroTestDataMultiLevel3_1(rows: Int, options: util.Map[String, String]): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+
+    val mySchema = """ {
+                     |	"name": "address",
+                     |	"type": "record",
+                     |	"fields": [
+                     |		{
+                     |			"name": "name",
+                     |			"type": "string"
+                     |		},
+                     |		{
+                     |			"name": "age",
+                     |			"type": "int"
+                     |		},
+                     |		{
+                     |			"name": "doorNum",
+                     |			"type": {
+                     |				"type": "array",
+                     |				"items": {
+                     |					"type": "record",
+                     |					"name": "my_address",
+                     |					"fields": [
+                     |						{
+                     |							"name": "street",
+                     |							"type": "string"
+                     |						},
+                     |						{
+                     |							"name": "city",
+                     |							"type": "string"
+                     |						},
+                     |						{
+                     |							"name": "FloorNum",
+                     |                			"type": {
+                     |                				"type": "record",
+                     |                				"name": "Floor",
+                     |                				"fields": [
+                     |                					{
+                     |                						"name": "wing",
+                     |                						"type": "string"
+                     |                					},
+                     |                					{
+                     |                						"name": "number",
+                     |                						"type": "int"
+                     |                					}
+                     |                				]
+                     |                			}
+                     |						}
+                     |					]
+                     |				}
+                     |			}
+                     |		}
+                     |	]
+                     |} """.stripMargin
+
+
+    val json =
+      """  {
+        |	"name": "bob",
+        |	"age": 10,
+        |	"doorNum": [
+        |		{
+        |			"street": "abc",
+        |			"city": "city1",
+        |			"FloorNum": {"wing" : "a", "number" : 1}
+        |		},
+        |		{
+        |			"street": "def",
+        |			"city": "city2",
+        |			"FloorNum": {"wing" : "b", "number" : 0}
+        |		},
+        |		{
+        |			"street": "ghi",
+        |			"city": "city3",
+        |			"FloorNum": {"wing" : "a", "number" : 2}
+        |		}
+        |	]
+        |}  """.stripMargin
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("street", DataTypes.STRING))
+    fld.add(new StructField("city", DataTypes.STRING))
+
+    val subFld = new util.ArrayList[StructField]
+    subFld.add(new StructField("wing", DataTypes.STRING))
+    subFld.add(new StructField("number", DataTypes.INT))
+    fld.add(new StructField("FloorNum", DataTypes.createStructType(subFld)))
+
+    // array of struct of struct
+    val fld2 = new util.ArrayList[StructField]
+    fld2.add(new StructField("my_address", DataTypes.createStructType(fld), fld))
+    fields(2) = new Field("doorNum", DataTypes.createArrayType(fld2.get(0).getDataType), fld2)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+  }
+
+  def buildAvroTestDataMultiLevel3_1Type(): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+    buildAvroTestDataMultiLevel3_1(3, null)
+  }
+
+  // test multi level -- 3 levels [array of struct of struct of string, int]
+  test("test multi level support : array of struct of struct") {
+    buildAvroTestDataMultiLevel3_1Type()
+    assert(new File(writerPath).exists())
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    sql("select * from sdkOutputTable").show(false)
+
+    // TODO: Add a validation
+    /*
+    +----+---+---------------------------------------------------------+
+    |name|age|doorNum                                                  |
+    +----+---+---------------------------------------------------------+
+    |bob |10 |[[abc,city1,[a,1]], [def,city2,[b,0]], [ghi,city3,[a,2]]]|
+    |bob |10 |[[abc,city1,[a,1]], [def,city2,[b,0]], [ghi,city3,[a,2]]]|
+    |bob |10 |[[abc,city1,[a,1]], [def,city2,[b,0]], [ghi,city3,[a,2]]]|
+    +----+---+---------------------------------------------------------+
+    */
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    cleanTestData()
+  }
+
+  // test multi level -- 3 levels [array of array of array of int]
+  def buildAvroTestDataMultiLevel3_2(rows: Int, options: util.Map[String, String]): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+
+    val mySchema = """ {
+                     |	"name": "address",
+                     |	"type": "record",
+                     |	"fields": [
+                     |		{
+                     |			"name": "name",
+                     |			"type": "string"
+                     |		},
+                     |		{
+                     |			"name": "age",
+                     |			"type": "int"
+                     |		},
+                     |		{
+                     |			"name": "BuildNum",
+                     |			"type": {
+                     |				"type": "array",
+                     |				"items": {
+                     |					"name": "FloorNum",
+                     |					"type": "array",
+                     |					"items": {
+                     |						"name": "doorNum",
+                     |						"type": "array",
+                     |						"items": {
+                     |							"name": "EachdoorNums",
+                     |							"type": "int",
+                     |							"default": -1
+                     |						}
+                     |					}
+                     |				}
+                     |			}
+                     |		}
+                     |	]
+                     |} """.stripMargin
+
+    val json =
+      """   {
+        |        	"name": "bob",
+        |        	"age": 10,
+        |        	"BuildNum": [[[1,2,3],[4,5,6]],[[10,20,30],[40,50,60]]]
+        |        }   """.stripMargin
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val subFld = new util.ArrayList[StructField]
+    subFld.add(new StructField("EachDoorNum", DataTypes.INT))
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("DoorNum", DataTypes.createArrayType(DataTypes.INT), subFld))
+    // array of struct of struct
+    val doorNum = new util.ArrayList[StructField]
+    doorNum.add(new StructField("FloorNum",
+      DataTypes.createArrayType(DataTypes.createArrayType(DataTypes.INT)), fld))
+    fields(2) = new Field("BuildNum", "array", doorNum)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+  }
+
+  def buildAvroTestDataMultiLevel3_2Type(): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+    buildAvroTestDataMultiLevel3_2(3, null)
+  }
+
+  // test multi level -- 3 levels [array of array of array of int]
+  test("test multi level support : array of array of array of int") {
+    buildAvroTestDataMultiLevel3_2Type()
+    assert(new File(writerPath).exists())
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    sql("select * from sdkOutputTable").show(false)
+
+    // TODO: Add a validation
+    /*
+    +----+---+---------------------------------------------------------------------------+
+    |name|age|BuildNum
+                                               |
+    +----+---+---------------------------------------------------------------------------+
+    |bob |10 |[WrappedArray(WrappedArray(1, 2, 3), WrappedArray(4, 5, 6)), WrappedArray
+    (WrappedArray(10, 20, 30), WrappedArray(40, 50, 60))]|
+    |bob |10 |[WrappedArray(WrappedArray(1, 2, 3), WrappedArray(4, 5, 6)), WrappedArray
+    (WrappedArray(10, 20, 30), WrappedArray(40, 50, 60))]|
+    |bob |10 |[WrappedArray(WrappedArray(1, 2, 3), WrappedArray(4, 5, 6)), WrappedArray
+    (WrappedArray(10, 20, 30), WrappedArray(40, 50, 60))]|
+    +----+---+---------------------------------------------------------------------------+
+   */
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    cleanTestData()
   }
 
+
+
+  // test multi level -- 4 levels [array of array of array of struct]
+  def buildAvroTestDataMultiLevel4(rows: Int, options: util.Map[String, String]): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+
+    val mySchema =  """ {
+                      |	"name": "address",
+                      |	"type": "record",
+                      |	"fields": [
+                      |		{
+                      |			"name": "name",
+                      |			"type": "string"
+                      |		},
+                      |		{
+                      |			"name": "age",
+                      |			"type": "int"
+                      |		},
+                      |		{
+                      |			"name": "BuildNum",
+                      |			"type": {
+                      |				"type": "array",
+                      |				"items": {
+                      |					"name": "FloorNum",
+                      |					"type": "array",
+                      |					"items": {
+                      |						"name": "doorNum",
+                      |						"type": "array",
+                      |						"items": {
+                      |							"name": "my_address",
+                      |							"type": "record",
+                      |							"fields": [
+                      |								{
+                      |									"name": "street",
+                      |									"type": "string"
+                      |								},
+                      |								{
+                      |									"name": "city",
+                      |									"type": "string"
+                      |								}
+                      |							]
+                      |						}
+                      |					}
+                      |				}
+                      |			}
+                      |		}
+                      |	]
+                      |} """.stripMargin
+
+    val json =
+      """ {
+        |	"name": "bob",
+        |	"age": 10,
+        |	"BuildNum": [
+        |		[
+        |			[
+        |				{"street":"abc", "city":"city1"},
+        |				{"street":"def", "city":"city2"},
+        |				{"street":"cfg", "city":"city3"}
+        |			],
+        |			[
+        |				 {"street":"abc1", "city":"city3"},
+        |				 {"street":"def1", "city":"city4"},
+        |				 {"street":"cfg1", "city":"city5"}
+        |			]
+        |		],
+        |		[
+        |			[
+        |				 {"street":"abc2", "city":"cityx"},
+        |				 {"street":"abc3", "city":"cityy"},
+        |				 {"street":"abc4", "city":"cityz"}
+        |			],
+        |			[
+        |				 {"street":"a1bc", "city":"cityA"},
+        |				 {"street":"a1bc", "city":"cityB"},
+        |				 {"street":"a1bc", "city":"cityc"}
+        |			]
+        |		]
+        |	]
+        |} """.stripMargin
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val subFld = new util.ArrayList[StructField]
+    subFld.add(new StructField("EachDoorNum", DataTypes.INT))
+
+    val address = new util.ArrayList[StructField]
+    address.add(new StructField("street", DataTypes.STRING))
+    address.add(new StructField("city", DataTypes.STRING))
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("DoorNum",
+        DataTypes.createArrayType(DataTypes.createStructType(address)),
+        subFld))
+    // array of struct of struct
+    val doorNum = new util.ArrayList[StructField]
+    doorNum.add(new StructField("FloorNum",
+      DataTypes.createArrayType(
+        DataTypes.createArrayType(DataTypes.createStructType(address))), fld))
+    fields(2) = new Field("BuildNum", "array", doorNum)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+  }
+
+  def buildAvroTestDataMultiLevel4Type(): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+    buildAvroTestDataMultiLevel4(3, null)
+  }
+
+  // test multi level -- 4 levels [array of array of array of struct]
+  test("test multi level support : array of array of array of struct") {
+    buildAvroTestDataMultiLevel4Type()
+    assert(new File(writerPath).exists())
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    sql("select * from sdkOutputTable").show(false)
+
+    // TODO: Add a validation
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    cleanTestData()
+  }
+
+
 }
\ No newline at end of file


[15/50] [abbrv] carbondata git commit: [CARBONDATA-2401] Date and Timestamp options are not working in SDK

Posted by ma...@apache.org.
[CARBONDATA-2401] Date and Timestamp options are not working in SDK

Issue:- Date and Timestamp format is passed in options of SDK but data
load is failed even correct data is set as per format .
Cause:- Load model is getting overwritten with default .
Fix :- if user has passed the options then Load model should use from
Options otherwise from Default.

This closes #2227


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/b2060c61
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/b2060c61
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/b2060c61

Branch: refs/heads/spark-2.3
Commit: b2060c61104895d5599b1c044b725d56a39748f8
Parents: ceb7c8d
Author: BJangir <ba...@gmail.com>
Authored: Wed Apr 25 18:27:58 2018 +0530
Committer: kumarvishal09 <ku...@gmail.com>
Committed: Thu May 10 14:00:59 2018 +0530

----------------------------------------------------------------------
 .../TestNonTransactionalCarbonTable.scala       | 39 ++++++++++++++++++++
 .../loading/model/CarbonLoadModelBuilder.java   |  4 --
 2 files changed, 39 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/b2060c61/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index 2f88c40..ca6ac3c 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -17,6 +17,8 @@
 
 package org.apache.carbondata.spark.testsuite.createTable
 
+import java.sql.Timestamp
+import java.io.{File, FileFilter, IOException}
 import java.io.{File, FileFilter}
 import java.util
 
@@ -31,6 +33,7 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.util.CarbonUtil
+import org.apache.carbondata.sdk.file.{CarbonWriter, CarbonWriterBuilder, Field, Schema}
 import org.apache.carbondata.sdk.file.{AvroCarbonWriter, CarbonWriter, Field, Schema}
 import scala.collection.JavaConverters._
 import scala.collection.mutable
@@ -39,6 +42,10 @@ import org.apache.avro
 import org.apache.commons.lang.CharEncoding
 import tech.allegro.schema.json2avro.converter.JsonAvroConverter
 
+import org.apache.carbondata.core.metadata.datatype.{DataTypes, StructField}
+import org.apache.carbondata.sdk.file.{CarbonWriter, CarbonWriterBuilder, Field, Schema}
+
+
 class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
   var writerPath = new File(this.getClass.getResource("/").getPath
@@ -669,6 +676,38 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
+  test("test custom  format for date and timestamp in sdk") {
+
+    cleanTestData()
+    var options = Map("dateformat" -> "dd-MM-yyyy" ,"timestampformat" -> "dd-MM-yyyy HH:mm:ss").asJava
+
+    val fields: Array[Field] = new Array[Field](4)
+    fields(0) = new Field("stringField", DataTypes.STRING)
+    fields(1) = new Field("intField", DataTypes.INT)
+    fields(2) = new Field("mydate", DataTypes.DATE)
+    fields(3) = new Field("mytime", DataTypes.TIMESTAMP)
+
+    val builder: CarbonWriterBuilder = CarbonWriter.builder.withSchema(new Schema(fields))
+      .outputPath(writerPath).isTransactionalTable(false).withLoadOptions(options)
+
+    val writer: CarbonWriter = builder.buildWriterForCSVInput
+    writer.write(Array("babu","1","02-01-2002","02-01-2002 01:01:00"));
+    writer.close()
+
+    assert(new File(writerPath).exists())
+
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    checkAnswer(sql("select * from sdkOutputTable"), Seq(
+      Row("babu", 1, java.sql.Date.valueOf("2002-01-02"),Timestamp.valueOf("2002-01-02 01:01:00.0"))))
+    sql("DROP TABLE sdkOutputTable")
+    cleanTestData()
+
+  }
+
   test("test huge data write with one batch having bad record") {
 
     val exception =

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b2060c61/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java b/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java
index 3385479..13dd75c 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java
@@ -80,10 +80,6 @@ public class CarbonLoadModelBuilder {
     // we have provided 'fileheader', so it hadoopConf can be null
     build(options, optionsFinal, model, null);
 
-
-    // set default values
-    model.setTimestampformat(CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT);
-    model.setDateFormat(CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT);
     model.setUseOnePass(Boolean.parseBoolean(Maps.getOrDefault(options, "onepass", "false")));
     model.setDictionaryServerHost(Maps.getOrDefault(options, "dicthost", null));
     try {


[46/50] [abbrv] carbondata git commit: [CARBONDATA-2463] concurrent insert requires separtate temp path which is differentiated with seg_id only

Posted by ma...@apache.org.
[CARBONDATA-2463] concurrent insert requires separtate temp path which is differentiated with seg_id only

issue : for non-transaction table we are not setting any segment-id in loadmodel .
temp location contains databasenaem_tablename_segId_taskid . during concurrent insert in same table,
temp folder has to be created at separate location . only segment_id value will change during two concurrent insert.

solution: insert flow for external table is same as carbon table., and we are not storing segment
info to anywhere. so dummy unique segment-id can be assigned to loadmodel for nontransactional table.

This closes #2284


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/fc4b7f9b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/fc4b7f9b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/fc4b7f9b

Branch: refs/heads/spark-2.3
Commit: fc4b7f9b96eebdcaf6cd513d911a21761bbc5a77
Parents: 3d23fa6
Author: rahulforallp <ra...@knoldus.in>
Authored: Tue May 8 17:11:41 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Tue May 15 13:17:15 2018 +0530

----------------------------------------------------------------------
 .../TestNonTransactionalCarbonTable.scala       | 43 +++++++++++++++++++-
 .../management/CarbonLoadDataCommand.scala      |  2 +
 .../sdk/file/CarbonWriterBuilder.java           |  4 +-
 .../sdk/file/CSVCarbonWriterTest.java           |  2 +-
 .../CSVNonTransactionalCarbonWriterTest.java    |  4 +-
 5 files changed, 49 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/fc4b7f9b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index fb9c862..5ab1c60 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -19,8 +19,10 @@ package org.apache.carbondata.spark.testsuite.createTable
 
 import java.sql.Timestamp
 import java.io.{File, FileFilter, IOException}
-import java.io.{File, FileFilter}
 import java.util
+import java.util.concurrent.TimeUnit
+
+import scala.concurrent.ExecutionContext.Implicits.global
 
 import org.apache.commons.io.FileUtils
 import org.apache.spark.sql.Row
@@ -35,6 +37,8 @@ import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
 import scala.collection.JavaConverters._
 import scala.collection.mutable
+import scala.concurrent.{Await, Future}
+import scala.concurrent.duration.Duration
 
 import org.apache.avro
 import org.apache.commons.lang.CharEncoding
@@ -263,6 +267,43 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS sdkOutputTable")
   }
 
+  test("concurrently insert operation"){
+    cleanTestData()
+    buildTestDataSingleFile()
+    assert(new File(writerPath).exists())
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+
+    // with partition
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable(name string) PARTITIONED BY (age int) STORED BY
+         |'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    sql("drop table if exists t1")
+    sql("create table if not exists t1 (name string, age int, height double) STORED BY 'org.apache.carbondata.format'")
+    var i =0;
+    while (i<50){
+      sql (s"""insert into t1 values ("aaaaa", 12, 20)""").show(200,false)
+      i = i+1;
+    }
+    checkAnswer(sql("select count(*) from t1"),Seq(Row(50)))
+    val one = Future {
+      sql("insert into sdkOutputTable select * from t1 ")
+    }
+    val two = Future {
+      sql("insert into sdkOutputTable select * from t1 ")
+    }
+
+    Await.result(Future.sequence(Seq(one, two)), Duration(300, TimeUnit.SECONDS))
+
+    checkAnswer(sql("select count(*) from sdkOutputTable"),Seq(Row(103)))
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    assert(new File(writerPath).exists())
+    cleanTestData()
+  }
+
   test(
     "Read two sdk writer outputs before and after deleting the existing files and creating new " +
     "files with same schema and UUID") {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fc4b7f9b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
index 3e306fb..3bef4b6 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
@@ -272,6 +272,8 @@ case class CarbonLoadDataCommand(
           if (!FileFactory.isFileExist(metadataDirectoryPath, fileType)) {
             FileFactory.mkdirs(metadataDirectoryPath, fileType)
           }
+        } else {
+          carbonLoadModel.setSegmentId(System.currentTimeMillis().toString)
         }
         val partitionStatus = SegmentStatus.SUCCESS
         val columnar = sparkSession.conf.get("carbon.is.columnar.storage", "true").toBoolean

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fc4b7f9b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
index 1816539..a01f0d7 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
@@ -109,8 +109,8 @@ public class CarbonWriterBuilder {
    * by default it is system time in nano seconds.
    * @return updated CarbonWriterBuilder
    */
-  public CarbonWriterBuilder taskNo(String taskNo) {
-    this.taskNo = taskNo;
+  public CarbonWriterBuilder taskNo(long taskNo) {
+    this.taskNo = String.valueOf(taskNo);
     return this;
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fc4b7f9b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVCarbonWriterTest.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVCarbonWriterTest.java b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVCarbonWriterTest.java
index ba3d3ac..d68d85b 100644
--- a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVCarbonWriterTest.java
+++ b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVCarbonWriterTest.java
@@ -257,7 +257,7 @@ public class CSVCarbonWriterTest {
     try {
       CarbonWriterBuilder builder = CarbonWriter.builder()
           .withSchema(new Schema(fields))
-          .isTransactionalTable(true).taskNo("5")
+          .isTransactionalTable(true).taskNo(5)
           .outputPath(path);
 
       CarbonWriter writer = builder.buildWriterForCSVInput();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fc4b7f9b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVNonTransactionalCarbonWriterTest.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVNonTransactionalCarbonWriterTest.java b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVNonTransactionalCarbonWriterTest.java
index 32fe6e8..19b0a42 100644
--- a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVNonTransactionalCarbonWriterTest.java
+++ b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVNonTransactionalCarbonWriterTest.java
@@ -96,7 +96,7 @@ public class CSVNonTransactionalCarbonWriterTest {
           .withSchema(schema)
           .isTransactionalTable(false)
           .uniqueIdentifier(System.currentTimeMillis())
-          .taskNo(Long.toString(System.nanoTime()))
+          .taskNo(System.nanoTime())
           .outputPath(path);
       if (sortColumns != null) {
         builder = builder.sortBy(sortColumns);
@@ -160,7 +160,7 @@ public class CSVNonTransactionalCarbonWriterTest {
           .withSchema(new Schema(fields))
           .uniqueIdentifier(System.currentTimeMillis())
           .isTransactionalTable(false)
-          .taskNo(Long.toString(System.nanoTime()))
+          .taskNo(System.nanoTime())
           .outputPath(path);
 
       CarbonWriter writer = builder.buildWriterForCSVInput();


[39/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/integration/spark-common-test/src/test/resources/tpch/nation.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/tpch/nation.csv b/integration/spark-common-test/src/test/resources/tpch/nation.csv
new file mode 100644
index 0000000..ed3fd5b
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/tpch/nation.csv
@@ -0,0 +1,25 @@
+0|ALGERIA|0| haggle. carefully final deposits detect slyly agai|
+1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon|
+2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special |
+3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold|
+4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d|
+5|ETHIOPIA|0|ven packages wake quickly. regu|
+6|FRANCE|3|refully final requests. regular, ironi|
+7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco|
+8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun|
+9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull|
+10|IRAN|4|efully alongside of the slyly final dependencies. |
+11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula|
+12|JAPAN|2|ously. final, express gifts cajole a|
+13|JORDAN|4|ic deposits are blithely about the carefully regular pa|
+14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t|
+15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets?|
+16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r|
+17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun|
+18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos|
+19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account|
+20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely|
+21|VIETNAM|2|hely enticingly express accounts. even, final |
+22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint|
+23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull|
+24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be|


[50/50] [abbrv] carbondata git commit: [CARBONDATA-2465] Improve the carbondata file reliability in data load when direct hdfs write is enabled

Posted by ma...@apache.org.
[CARBONDATA-2465] Improve the carbondata file reliability in data load when direct hdfs write is enabled

Problem: At present if we enable direct write on HDFS, data is written with replication of 1 which can cause data loss.
Solution: Write with cluster replication. With this change No need to invoke
CompleteHdfsBackendThread/completeRemainingHdfsReplicas for direct hdfs write case.

This closes #2235


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/8fe16566
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/8fe16566
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/8fe16566

Branch: refs/heads/spark-2.3
Commit: 8fe165668e2662455991f9de6af817ccc99b81ee
Parents: 6297ea0
Author: KanakaKumar <ka...@huawei.com>
Authored: Thu Apr 26 23:39:29 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Thu May 17 19:42:59 2018 +0530

----------------------------------------------------------------------
 .../apache/carbondata/core/util/CarbonUtil.java | 27 -----------
 .../store/writer/AbstractFactDataWriter.java    | 47 ++++++--------------
 2 files changed, 14 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/8fe16566/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index ac0a800..9dc4aa2 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -2817,33 +2817,6 @@ public final class CarbonUtil {
   }
 
   /**
-   * This method will complete the remaining hdfs replications
-   *
-   * @param fileName hdfs file name
-   * @param fileType filetype
-   * @throws CarbonDataWriterException if error occurs
-   */
-  public static void completeRemainingHdfsReplicas(String fileName, FileFactory.FileType fileType)
-    throws CarbonDataWriterException {
-    try {
-      long startTime = System.currentTimeMillis();
-      short replication = FileFactory.getDefaultReplication(fileName, fileType);
-      if (1 == replication) {
-        return;
-      }
-      boolean replicateFlag = FileFactory.setReplication(fileName, fileType, replication);
-      if (!replicateFlag) {
-        LOGGER.error("Failed to set replication for " + fileName + " with factor " + replication);
-      }
-      LOGGER.info(
-          "Total copy time (ms) to copy file " + fileName + " is " + (System.currentTimeMillis()
-              - startTime));
-    } catch (IOException e) {
-      throw new CarbonDataWriterException("Problem while completing remaining HDFS backups", e);
-    }
-  }
-
-  /**
    * This method will read the local carbon data file and write to carbon data file in HDFS
    *
    * @param carbonStoreFilePath

http://git-wip-us.apache.org/repos/asf/carbondata/blob/8fe16566/processing/src/main/java/org/apache/carbondata/processing/store/writer/AbstractFactDataWriter.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/writer/AbstractFactDataWriter.java b/processing/src/main/java/org/apache/carbondata/processing/store/writer/AbstractFactDataWriter.java
index 6e557cd..8115f97 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/writer/AbstractFactDataWriter.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/writer/AbstractFactDataWriter.java
@@ -176,6 +176,7 @@ public abstract class AbstractFactDataWriter implements CarbonFactDataWriter {
         CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_HDFS,
         CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_HDFS_DEFAULT);
     this.enableDirectlyWriteData2Hdfs = "TRUE".equalsIgnoreCase(directlyWriteData2Hdfs);
+
     if (enableDirectlyWriteData2Hdfs) {
       LOGGER.info("Carbondata will directly write fact data to HDFS.");
     } else {
@@ -274,22 +275,13 @@ public abstract class AbstractFactDataWriter implements CarbonFactDataWriter {
   protected void commitCurrentFile(boolean copyInCurrentThread) {
     notifyDataMapBlockEnd();
     CarbonUtil.closeStreams(this.fileOutputStream, this.fileChannel);
-    if (enableDirectlyWriteData2Hdfs) {
-      if (copyInCurrentThread) {
-        CarbonUtil.completeRemainingHdfsReplicas(carbonDataFileHdfsPath,
-            FileFactory.FileType.HDFS);
-      } else {
-        executorServiceSubmitList.add(executorService.submit(
-            new CompleteHdfsBackendThread(carbonDataFileHdfsPath, FileFactory.FileType.HDFS)));
-      }
-    } else {
+    if (!enableDirectlyWriteData2Hdfs) {
       if (copyInCurrentThread) {
         CarbonUtil.copyCarbonDataFileToCarbonStorePath(carbonDataFileTempPath,
-            model.getCarbonDataDirectoryPath(),
-            fileSizeInBytes);
+            model.getCarbonDataDirectoryPath(), fileSizeInBytes);
       } else {
         executorServiceSubmitList.add(executorService.submit(
-            new CompleteHdfsBackendThread(carbonDataFileTempPath, FileFactory.FileType.LOCAL)));
+            new CompleteHdfsBackendThread(carbonDataFileTempPath)));
       }
     }
   }
@@ -310,10 +302,9 @@ public abstract class AbstractFactDataWriter implements CarbonFactDataWriter {
       if (enableDirectlyWriteData2Hdfs) {
         // the block size will be twice the block_size specified by user to make sure that
         // one carbondata file only consists exactly one HDFS block.
-        // Here we write the first replication and will complete the remaining later.
-        fileOutputStream = FileFactory.getDataOutputStream(carbonDataFileHdfsPath,
-            FileFactory.FileType.HDFS, CarbonCommonConstants.BYTEBUFFER_SIZE, fileSizeInBytes * 2,
-            (short) 1);
+        fileOutputStream = FileFactory
+            .getDataOutputStream(carbonDataFileHdfsPath, FileFactory.FileType.HDFS,
+                CarbonCommonConstants.BYTEBUFFER_SIZE, fileSizeInBytes * 2);
       } else {
         //each time we initialize writer, we choose a local temp location randomly
         String[] tempFileLocations = model.getStoreLocation();
@@ -416,13 +407,10 @@ public abstract class AbstractFactDataWriter implements CarbonFactDataWriter {
       writer.writeThrift(blockIndex);
     }
     writer.close();
-    if (enableDirectlyWriteData2Hdfs) {
-      executorServiceSubmitList.add(executorService.submit(
-          new CompleteHdfsBackendThread(indexFileName, FileFactory.FileType.HDFS)));
-    } else {
-      CarbonUtil.copyCarbonDataFileToCarbonStorePath(indexFileName,
-          model.getCarbonDataDirectoryPath(),
-          fileSizeInBytes);
+    if (!enableDirectlyWriteData2Hdfs) {
+      CarbonUtil
+          .copyCarbonDataFileToCarbonStorePath(indexFileName, model.getCarbonDataDirectoryPath(),
+              fileSizeInBytes);
     }
   }
 
@@ -459,11 +447,9 @@ public abstract class AbstractFactDataWriter implements CarbonFactDataWriter {
      * carbon store path
      */
     private String fileName;
-    private FileFactory.FileType fileType;
 
-    private CompleteHdfsBackendThread(String fileName, FileFactory.FileType fileType) {
+    private CompleteHdfsBackendThread(String fileName) {
       this.fileName = fileName;
-      this.fileType = fileType;
     }
 
     /**
@@ -474,13 +460,8 @@ public abstract class AbstractFactDataWriter implements CarbonFactDataWriter {
      */
     @Override
     public Void call() throws Exception {
-      if (FileFactory.FileType.HDFS == fileType) {
-        CarbonUtil.completeRemainingHdfsReplicas(fileName, fileType);
-      } else {
-        CarbonUtil.copyCarbonDataFileToCarbonStorePath(fileName,
-            model.getCarbonDataDirectoryPath(),
-            fileSizeInBytes);
-      }
+      CarbonUtil.copyCarbonDataFileToCarbonStorePath(fileName, model.getCarbonDataDirectoryPath(),
+          fileSizeInBytes);
       return null;
     }
   }


[32/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTpchTestCase.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTpchTestCase.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTpchTestCase.scala
new file mode 100644
index 0000000..89813b5
--- /dev/null
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTpchTestCase.scala
@@ -0,0 +1,247 @@
+package org.apache.carbondata.mv.rewrite
+
+import java.io.File
+
+import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.execution.datasources.LogicalRelation
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+class MVTpchTestCase extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    drop()
+    val projectPath = new File(this.getClass.getResource("/").getPath + "../../../../../")
+      .getCanonicalPath.replaceAll("\\\\", "/")
+    val integrationPath = s"$projectPath/integration"
+    val resourcesPath = s"$integrationPath/spark-common-test/src/test/resources"
+
+    sql(s"""create table if not exists LINEITEM(  L_SHIPDATE date,  L_SHIPMODE string,  L_SHIPINSTRUCT string,  L_RETURNFLAG string,  L_RECEIPTDATE date,  L_ORDERKEY INT ,  L_PARTKEY INT ,  L_SUPPKEY   string,  L_LINENUMBER int,  L_QUANTITY double,  L_EXTENDEDPRICE double,  L_DISCOUNT double,  L_TAX double,  L_LINESTATUS string,  L_COMMITDATE date,  L_COMMENT  string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""create table if not exists ORDERS(  O_ORDERDATE date,  O_ORDERPRIORITY string,  O_ORDERSTATUS string,  O_ORDERKEY int,  O_CUSTKEY string,  O_TOTALPRICE double,  O_CLERK string,  O_SHIPPRIORITY int,  O_COMMENT string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""create table if not exists CUSTOMER(  C_MKTSEGMENT string,  C_NATIONKEY string,  C_CUSTKEY string,  C_NAME string,  C_ADDRESS string,  C_PHONE string,  C_ACCTBAL double,  C_COMMENT string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""create table if not exists REGION(  R_NAME string,  R_REGIONKEY string,  R_COMMENT string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""create table if not exists NATION (  N_NAME string,  N_NATIONKEY string,  N_REGIONKEY string,  N_COMMENT  string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""create table if not exists SUPPLIER(S_COMMENT string,S_SUPPKEY string,S_NAME string, S_ADDRESS string, S_NATIONKEY string, S_PHONE string, S_ACCTBAL double) STORED BY 'org.apache.carbondata.format'""")
+
+    sql(s"""load data inpath "$resourcesPath/tpch/lineitem.csv" into table lineitem options('DELIMITER'='|','FILEHEADER'='L_ORDERKEY,L_PARTKEY,L_SUPPKEY,L_LINENUMBER,L_QUANTITY,L_EXTENDEDPRICE,L_DISCOUNT,L_TAX,L_RETURNFLAG,L_LINESTATUS,L_SHIPDATE,L_COMMITDATE,L_RECEIPTDATE,L_SHIPINSTRUCT,L_SHIPMODE,L_COMMENT')""")
+    sql(s"""load data inpath "$resourcesPath/tpch/orders.csv" into table ORDERS options('DELIMITER'='|','FILEHEADER'='O_ORDERKEY,O_CUSTKEY,O_ORDERSTATUS,O_TOTALPRICE,O_ORDERDATE,O_ORDERPRIORITY,O_CLERK,O_SHIPPRIORITY,O_COMMENT')""")
+    sql(s"""load data inpath "$resourcesPath/tpch/customers.csv" into  table CUSTOMER options('DELIMITER'='|','FILEHEADER'='C_CUSTKEY,C_NAME,C_ADDRESS,C_NATIONKEY,C_PHONE,C_ACCTBAL,C_MKTSEGMENT,C_COMMENT')""")
+    sql(s"""load data inpath "$resourcesPath/tpch/region.csv" into table REGION options('DELIMITER'='|','FILEHEADER'='R_REGIONKEY,R_NAME,R_COMMENT')""")
+    sql(s"""load data inpath "$resourcesPath/tpch/nation.csv" into table NATION options('DELIMITER'='|','FILEHEADER'='N_NATIONKEY,N_NAME,N_REGIONKEY,N_COMMENT')""")
+    sql(s"""load data inpath "$resourcesPath/tpch/supplier.csv" into table SUPPLIER options('DELIMITER'='|','FILEHEADER'='S_SUPPKEY,S_NAME,S_ADDRESS,S_NATIONKEY,S_PHONE,S_ACCTBAL,S_COMMENT')""")
+
+
+    sql(s"""create table if not exists LINEITEM1(  L_SHIPDATE date,  L_SHIPMODE string,  L_SHIPINSTRUCT string,  L_RETURNFLAG string,  L_RECEIPTDATE date,  L_ORDERKEY INT ,  L_PARTKEY INT ,  L_SUPPKEY   string,  L_LINENUMBER int,  L_QUANTITY double,  L_EXTENDEDPRICE double,  L_DISCOUNT double,  L_TAX double,  L_LINESTATUS string,  L_COMMITDATE date,  L_COMMENT  string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""create table if not exists ORDERS1(  O_ORDERDATE date,  O_ORDERPRIORITY string,  O_ORDERSTATUS string,  O_ORDERKEY int,  O_CUSTKEY string,  O_TOTALPRICE double,  O_CLERK string,  O_SHIPPRIORITY int,  O_COMMENT string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""create table if not exists CUSTOMER1(  C_MKTSEGMENT string,  C_NATIONKEY string,  C_CUSTKEY string,  C_NAME string,  C_ADDRESS string,  C_PHONE string,  C_ACCTBAL double,  C_COMMENT string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""create table if not exists REGION1(  R_NAME string,  R_REGIONKEY string,  R_COMMENT string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""create table if not exists NATION1 (  N_NAME string,  N_NATIONKEY string,  N_REGIONKEY string,  N_COMMENT  string) STORED BY 'org.apache.carbondata.format'""")
+    sql(s"""create table if not exists SUPPLIER1(S_COMMENT string,S_SUPPKEY string,S_NAME string, S_ADDRESS string, S_NATIONKEY string, S_PHONE string, S_ACCTBAL double) STORED BY 'org.apache.carbondata.format'""")
+
+    sql(s"""load data inpath "$resourcesPath/tpch/lineitem.csv" into table lineitem1 options('DELIMITER'='|','FILEHEADER'='L_ORDERKEY,L_PARTKEY,L_SUPPKEY,L_LINENUMBER,L_QUANTITY,L_EXTENDEDPRICE,L_DISCOUNT,L_TAX,L_RETURNFLAG,L_LINESTATUS,L_SHIPDATE,L_COMMITDATE,L_RECEIPTDATE,L_SHIPINSTRUCT,L_SHIPMODE,L_COMMENT')""")
+    sql(s"""load data inpath "$resourcesPath/tpch/orders.csv" into table ORDERS1 options('DELIMITER'='|','FILEHEADER'='O_ORDERKEY,O_CUSTKEY,O_ORDERSTATUS,O_TOTALPRICE,O_ORDERDATE,O_ORDERPRIORITY,O_CLERK,O_SHIPPRIORITY,O_COMMENT')""")
+    sql(s"""load data inpath "$resourcesPath/tpch/customers.csv" into  table CUSTOMER1 options('DELIMITER'='|','FILEHEADER'='C_CUSTKEY,C_NAME,C_ADDRESS,C_NATIONKEY,C_PHONE,C_ACCTBAL,C_MKTSEGMENT,C_COMMENT')""")
+    sql(s"""load data inpath "$resourcesPath/tpch/region.csv" into table REGION1 options('DELIMITER'='|','FILEHEADER'='R_REGIONKEY,R_NAME,R_COMMENT')""")
+    sql(s"""load data inpath "$resourcesPath/tpch/nation.csv" into table NATION1 options('DELIMITER'='|','FILEHEADER'='N_NATIONKEY,N_NAME,N_REGIONKEY,N_COMMENT')""")
+    sql(s"""load data inpath "$resourcesPath/tpch/supplier.csv" into table SUPPLIER1 options('DELIMITER'='|','FILEHEADER'='S_SUPPKEY,S_NAME,S_ADDRESS,S_NATIONKEY,S_PHONE,S_ACCTBAL,S_COMMENT')""")
+
+
+  }
+
+  test("test create datamap with tpch1") {
+    sql(s"drop datamap if exists datamap1")
+    sql("create datamap datamap1 using 'mv' as select l_returnflag, l_linestatus,l_shipdate, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice*(1-l_discount)) as sum_disc_price, sum(l_extendedprice*(1-l_discount)*(1+l_tax)) as sum_charge,count(*) as count_order from lineitem group by l_returnflag, l_linestatus,l_shipdate")
+    sql(s"rebuild datamap datamap1")
+    val df = sql("select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice*(1-l_discount)) as sum_disc_price, sum(l_extendedprice*(1-l_discount)*(1+l_tax)) as sum_charge,count(*) as count_order from lineitem where l_shipdate <= date('1998-09-02') group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap1"))
+//    checkAnswer(df, sql("select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice*(1-l_discount)) as sum_disc_price, sum(l_extendedprice*(1-l_discount)*(1+l_tax)) as sum_charge,count(*) as count_order from lineitem1 where l_shipdate <= date('1998-09-02') group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus"))
+    sql(s"drop datamap datamap1")
+  }
+
+  test("test create datamap with tpch1 with order") {
+    sql(s"drop datamap if exists datamap2")
+    sql("create datamap datamap2 using 'mv' as select l_returnflag, l_linestatus,l_shipdate, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice*(1-l_discount)) as sum_disc_price, sum(l_extendedprice*(1-l_discount)*(1+l_tax)) as sum_charge from lineitem group by l_returnflag, l_linestatus,l_shipdate order by l_returnflag, l_linestatus")
+    sql(s"rebuild datamap datamap2")
+    val df = sql("select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice*(1-l_discount)) as sum_disc_price, sum(l_extendedprice*(1-l_discount)*(1+l_tax)) as sum_charge from lineitem where l_shipdate <= date('1998-09-02') group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap2"))
+//    checkAnswer(df, sql("select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice*(1-l_discount)) as sum_disc_price, sum(l_extendedprice*(1-l_discount)*(1+l_tax)) as sum_charge from lineitem1 where l_shipdate <= date('1998-09-02') group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus"))
+    sql(s"drop datamap datamap2")
+  }
+
+  test("test create datamap with tpch1 with sub group by") {
+    sql(s"drop datamap if exists datamap3")
+    sql("create datamap datamap3 using 'mv' as select l_returnflag, l_linestatus,l_shipdate, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice*(1-l_discount)) as sum_disc_price, sum(l_extendedprice*(1-l_discount)*(1+l_tax)) as sum_charge from lineitem group by l_returnflag, l_linestatus,l_shipdate")
+    sql(s"rebuild datamap datamap3")
+    val df = sql("select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice*(1-l_discount)) as sum_disc_price from lineitem where l_shipdate <= date('1998-09-02') group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap3"))
+//    checkAnswer(df, sql("select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice*(1-l_discount)) as sum_disc_price from lineitem1 where l_shipdate <= date('1998-09-02') group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus"))
+    sql(s"drop datamap datamap3")
+  }
+
+  ignore("test create datamap with tpch3") {
+    sql(s"drop datamap if exists datamap4")
+    sql("create datamap datamap4 using 'mv' as select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority")
+    sql(s"rebuild datamap datamap4")
+    val df = sql("select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap4"))
+//    checkAnswer(df, sql("select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer1, orders1, lineitem1 where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10"))
+    sql(s"drop datamap datamap4")
+  }
+
+  test("test create datamap with tpch3 with no filters on mv") {
+    sql(s"drop datamap if exists datamap5")
+    sql("create datamap datamap5 using 'mv' as select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority,c_mktsegment,l_shipdate from customer, orders, lineitem where c_custkey = o_custkey and l_orderkey = o_orderkey group by l_orderkey, o_orderdate, o_shippriority,c_mktsegment,l_shipdate")
+    sql(s"rebuild datamap datamap5")
+    val df = sql("select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap5"))
+//    checkAnswer(df, sql("select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer1, orders1, lineitem1 where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10"))
+    sql(s"drop datamap datamap5")
+  }
+
+  ignore("test create datamap with tpch3 with filters on mv and all filter columns on projection") {
+    sql(s"drop datamap if exists datamap5")
+    sql("create datamap datamap5 using 'mv' as select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority,c_mktsegment,l_shipdate from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority,c_mktsegment,l_shipdate")
+    sql(s"rebuild datamap datamap5")
+    val df = sql("select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap5"))
+//    checkAnswer(df, sql("select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer1, orders1, lineitem1 where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10"))
+    sql(s"drop datamap datamap5")
+  }
+
+  ignore("test create datamap with tpch4 (core issue)") {
+    sql(s"drop datamap if exists datamap6")
+    sql("create datamap datamap6 using 'mv' as select o_orderpriority, count(*) as order_count from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-10-01') and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority")
+    sql(s"rebuild datamap datamap6")
+    val df = sql("select o_orderpriority, count(*) as order_count from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-10-01') and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap6"))
+//    checkAnswer(df, sql("select o_orderpriority, count(*) as order_count from orders1 where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-10-01') and exists ( select * from lineitem1 where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority"))
+    sql(s"drop datamap datamap6")
+  }
+
+  ignore("test create datamap with tpch5") {
+    sql(s"drop datamap if exists datamap7")
+    sql("create datamap datamap7 using 'mv' as select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1995-01-01') group by n_name")
+    sql(s"rebuild datamap datamap7")
+    val df = sql("select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1995-01-01') group by n_name order by revenue desc")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap7"))
+//    checkAnswer(df, sql("select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer1, orders1, lineitem1, supplier1, nation1, region1 where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1995-01-01') group by n_name order by revenue desc"))
+    sql(s"drop datamap datamap7")
+  }
+
+  test("test create datamap with tpch5 with no filters on mv") {
+    sql(s"drop datamap if exists datamap8")
+    sql("create datamap datamap8 using 'mv' as select n_name,o_orderdate,r_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey  group by n_name,o_orderdate,r_name")
+    sql(s"rebuild datamap datamap8")
+    val df = sql("select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1995-01-01') group by n_name order by revenue desc")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap8"))
+//    checkAnswer(df, sql("select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer1, orders1, lineitem1, supplier1, nation1, region1 where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1995-01-01') group by n_name order by revenue desc"))
+    sql(s"drop datamap datamap8")
+  }
+
+  test("test create datamap with tpch6") {
+    sql(s"drop datamap if exists datamap9")
+    sql("create datamap datamap9 using 'mv' as select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1995-01-01') and l_discount between 0.05 and 0.07 and l_quantity < 24")
+    sql(s"rebuild datamap datamap9")
+    val df = sql("select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1995-01-01') and l_discount between 0.05 and 0.07 and l_quantity < 24")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap9"))
+    assert(verifyAgg(analyzed))
+//    checkAnswer(df, sql("select sum(l_extendedprice * l_discount) as revenue from lineitem1 where l_shipdate >= date('1994-01-01') and l_shipdate < date('1995-01-01') and l_discount between 0.05 and 0.07 and l_quantity < 24"))
+    sql(s"drop datamap datamap9")
+  }
+
+  test("test create datamap with tpch6 with no filters on mv") {
+    sql(s"drop datamap if exists datamap10")
+    sql("create datamap datamap10 using 'mv' as select sum(l_extendedprice * l_discount) as revenue,l_shipdate,l_discount,l_quantity from lineitem group by l_shipdate,l_discount,l_quantity")
+    sql(s"rebuild datamap datamap10")
+    val df = sql("select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1995-01-01') and l_discount between 0.05 and 0.07 and l_quantity < 24")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap10"))
+    assert(verifyAgg(analyzed))
+//    checkAnswer(df, sql("select sum(l_extendedprice * l_discount) as revenue from lineitem1 where l_shipdate >= date('1994-01-01') and l_shipdate < date('1995-01-01') and l_discount between 0.05 and 0.07 and l_quantity < 24"))
+    sql(s"drop datamap datamap10")
+  }
+
+  test("test create datamap with tpch7 part of query1") {
+    sql(s"drop datamap if exists datamap11")
+    sql("create datamap datamap11 using 'mv' as select l_shipdate,n_name , l_extendedprice , l_discount from supplier,lineitem,orders,customer,nation n1 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n1.n_nationkey")
+    sql(s"rebuild datamap datamap11")
+    val df = sql("select year(l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier,lineitem,orders,customer,nation n1 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n1.n_nationkey and ( (n1.n_name = 'FRANCE') or (n1.n_name = 'GERMANY') ) and l_shipdate between date('1995-01-01') and date('1996-12-31')")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap11"))
+//    checkAnswer(df, sql("select year(l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier1,lineitem1,orders1,customer1,nation1 n1 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n1.n_nationkey and ( (n1.n_name = 'FRANCE') or (n1.n_name = 'GERMANY') ) and l_shipdate between date('1995-01-01') and date('1996-12-31')"))
+    sql(s"drop datamap datamap11")
+  }
+
+  test("test create datamap with tpch7 part of query2 (core issue)") {
+    sql(s"drop datamap if exists datamap12")
+    sql("create datamap datamap12 using 'mv' as select n1.n_name, l_shipdate, l_extendedprice ,l_discount from supplier,lineitem,orders,customer,nation n1 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n1.n_nationkey")
+    sql(s"rebuild datamap datamap12")
+    val df = sql("select supp_nation, l_year, sum(volume) as revenue from ( select n1.n_name as supp_nation, year(l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier,lineitem,orders,customer,nation n1 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n1.n_nationkey and ( (n1.n_name = 'FRANCE' ) or (n1.n_name = 'GERMANY') ) and l_shipdate between date('1995-01-01') and date('1996-12-31') ) as shipping group by supp_nation, l_year order by supp_nation, l_year")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap12"))
+//    checkAnswer(df, sql("select supp_nation, l_year, sum(volume) as revenue from ( select n1.n_name as supp_nation, year(l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier1,lineitem1,orders1,customer1,nation1 n1 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n1.n_nationkey and ( (n1.n_name = 'FRANCE' ) or (n1.n_name = 'GERMANY') ) and l_shipdate between date('1995-01-01') and date('1996-12-31') ) as shipping group by supp_nation, l_year order by supp_nation, l_year"))
+    sql(s"drop datamap datamap12")
+  }
+
+  ignore("test create datamap with tpch7 part of query3 (self join issue)") {
+    sql(s"drop datamap if exists datamap13")
+    sql("create datamap datamap13 using 'mv' as select n1.n_name as supp_nation, n2.n_name as cust_nation, l_shipdate, l_extendedprice * (1 - l_discount) as volume from supplier,lineitem,orders,customer,nation n1,nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey")
+    sql(s"rebuild datamap datamap13")
+    val df = sql("select supp_nation, cust_nation, l_year, sum(volume) as revenue from ( select n1.n_name as supp_nation, n2.n_name as cust_nation, year(l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier,lineitem,orders,customer,nation n1,nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ( (n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE') ) and l_shipdate between date('1995-01-01') and date('1996-12-31') ) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap13"))
+//    checkAnswer(df, sql("select supp_nation, cust_nation, l_year, sum(volume) as revenue from ( select n1.n_name as supp_nation, n2.n_name as cust_nation, year(l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier,lineitem1,orders1,customer1,nation1 n1,nation1 n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ( (n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE') ) and l_shipdate between date('1995-01-01') and date('1996-12-31') ) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year"))
+    sql(s"drop datamap datamap13")
+  }
+
+
+  def verifyMVDataMap(logicalPlan: LogicalPlan, dataMapName: String): Boolean = {
+    val tables = logicalPlan collect {
+      case l: LogicalRelation => l.catalogTable.get
+    }
+    tables.exists(_.identifier.table.equalsIgnoreCase(dataMapName+"_table"))
+  }
+  def verifyAgg(logicalPlan: LogicalPlan): Boolean = {
+    var aggExpExists = false
+    logicalPlan transformExpressions {
+      case a:AggregateExpression =>
+        aggExpExists = true
+        a
+    }
+    aggExpExists
+  }
+
+
+  def drop(): Unit = {
+    sql("drop table IF EXISTS LINEITEM")
+    sql("drop table IF EXISTS ORDERS")
+    sql("drop table IF EXISTS CUSTOMER")
+    sql("drop table IF EXISTS REGION")
+    sql("drop table IF EXISTS NATION")
+    sql("drop table IF EXISTS SUPPLIER")
+    sql("drop table IF EXISTS LINEITEM1")
+    sql("drop table IF EXISTS ORDERS1")
+    sql("drop table IF EXISTS CUSTOMER1")
+    sql("drop table IF EXISTS REGION1")
+    sql("drop table IF EXISTS NATION1")
+    sql("drop table IF EXISTS SUPPLIER1")
+  }
+
+  override def afterAll {
+//    drop()
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/SelectSelectExactChildrenSuite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/SelectSelectExactChildrenSuite.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/SelectSelectExactChildrenSuite.scala
new file mode 100644
index 0000000..0ee2475
--- /dev/null
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/SelectSelectExactChildrenSuite.scala
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.rewrite
+
+import org.apache.spark.sql.catalyst.dsl.expressions._
+import org.apache.spark.sql.catalyst.plans.logical._
+import org.apache.spark.sql.test.util.PlanTest
+
+class SelectSelectExactChildrenSuite extends PlanTest {
+  
+  object Match extends DefaultMatchMaker {
+    val patterns = SelectSelectNoChildDelta :: Nil
+  }
+  
+  val testRelation1 = LocalRelation('tid.int,'fpgid.int,'flid.int,'date.timestamp,'faid.int,'price.double,'qty.int,'disc.string)
+  val testRelation2 = LocalRelation('lid.int,'city.string,'state.string,'country.string)
+  
+//  test("pro-typical lower select") {
+//    val fact = testRelation1.subquery('fact)
+//    val dim = testRelation2.subquery('dim)
+//    
+//    val lowerSTSelect =
+//      fact
+//        .select('faid,'flid,Year('date) as 'year)
+//        .analyze
+//    val lowerUQSelect =
+//      fact.join(dim)
+//          .where("fact.flid".attr === "dim.lid".attr && "dim.country".attr === "USA")
+//          .select('faid,'flid,Year('date) as 'year, 'state)
+//          .analyze
+//          
+//    val matched = Match.execute(lowerSTSelect.model,lowerUQSelect.model,None).next 
+//    
+//    val correctAnswer = 
+//      lowerSTSelect.join(dim)
+//          .where("fact.flid".attr === "dim.lid".attr && "dim.country".attr === "USA") 
+//          .select('faid,'flid,Year('date) as 'year, 'state)
+//          .analyze.model
+//    
+//    comparePlans(matched, correctAnswer)
+//  }
+  
+//  val testSummaryDataset =
+//    s"""
+//       |SELECT faid, flid, year_proj(date) as year, count(*) as cnt
+//       |FROM Fact
+//       |GROUP BY faid, flid, year_proj(date)
+//    """.stripMargin.trim
+//      
+//  val testUserQuery = 
+//    s"""
+//       |SELECT faid, state, year_proj(date) as year, count(*) as cnt
+//       |FROM Fact
+//       |  INNER JOIN Dim 
+//       |  ON Fact.flid = Dim.lid AND Dim.country = "USA"
+//       |GROUP BY Fact.faid,Fact.state,year_proj(Fact.date)
+//       |HAVING count(*) > 2
+//    """.stripMargin.trim
+    
+  
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/Tpcds_1_4_Suite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/Tpcds_1_4_Suite.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/Tpcds_1_4_Suite.scala
new file mode 100644
index 0000000..7fac508
--- /dev/null
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/Tpcds_1_4_Suite.scala
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.rewrite
+
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.test.util.PlanTest
+import org.scalatest.BeforeAndAfter
+
+import org.apache.carbondata.mv.testutil.Tpcds_1_4_Tables._
+
+class Tpcds_1_4_Suite extends PlanTest with BeforeAndAfter {
+
+//  test("test using tpc-ds queries") {
+//
+//    tpcds1_4Tables.foreach { create_table =>
+//      hiveClient.runSqlHive(create_table)
+//    }
+    
+//    val dest = "case_30"
+//    val dest = "case_32"
+    val dest = "case_3"
+    
+//    tpcds_1_4_testCases.foreach { testcase =>
+//      if (testcase._1 == dest) {
+//        val mqoSession = new MQOSession(testHive.sparkSession)
+//        val summaryDF = testHive.sparkSession.sql(testcase._2)
+//        mqoSession.sharedState.registerSummaryDataset(summaryDF)
+//
+//        Try(mqoSession.rewrite(testcase._3).withSummaryData) match {
+//          case Success(rewrittenPlan) =>
+//            println(s"""\n\n===== REWRITTEN MODULAR PLAN for ${testcase._1} =====\n\n$rewrittenPlan \n""")
+//
+//            Try(rewrittenPlan.asCompactSQL) match {
+//              case Success(s) =>
+//                println(s"\n\n===== CONVERTED SQL for ${testcase._1} =====\n\n${s}\n")
+//                if (!s.trim.equals(testcase._4)) {
+//                  println(
+//                      s"""
+//                      |=== FAIL: SQLs do not match ===
+//                      |${sideBySide(s, testcase._4).mkString("\n")}
+//                      """.stripMargin)
+//                      }
+//
+//              case Failure(e) => println(s"""\n\n===== CONVERTED SQL for ${testcase._1} failed =====\n\n${e.toString}""")
+//            }
+//
+//          case Failure(e) => println(s"""\n\n==== MODULARIZE the logical query plan for ${testcase._1} failed =====\n\n${e.toString}""")
+//        }
+        
+//        val rewrittenSQL = rewrittenPlan.asCompactSQL
+//        val rewrittenSQL = mqoSession.rewrite(testcase._3).toCompactSQL
+
+//        if (!rewrittenSQL.equals(testcase._4)) {
+//          fail(
+//              s"""
+//              |=== FAIL: SQLs do not match ===
+//              |${sideBySide(rewrittenSQL, testcase._4).mkString("\n")}
+//              """.stripMargin)
+//              }
+//        }
+//
+//    }
+//
+//  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/matching/TestSQLBatch.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/matching/TestSQLBatch.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/matching/TestSQLBatch.scala
new file mode 100644
index 0000000..02bbff3
--- /dev/null
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/matching/TestSQLBatch.scala
@@ -0,0 +1,214 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.carbondata.mv.rewrite.matching
+
+object TestSQLBatch {
+
+  // seq of (summaryDataset(MV), testUserSQL(Q), correctRewrittenSQL)
+  val sampleTestCases = Seq(
+    ("case_1",
+     s"""
+        |SELECT i_item_id
+        |FROM Item
+        |WHERE i_item_sk = 1
+     """.stripMargin.trim,
+     s"""
+        |SELECT i_item_id, i_item_sk
+        |FROM Item
+        |WHERE i_item_sk = 1 and i_item_id > 0
+     """.stripMargin.trim,
+     s"""
+        |SELECT item.`i_item_id`, item.`i_item_sk` 
+        |FROM
+        |  item
+        |WHERE
+        |  (item.`i_item_sk` = 1) AND (item.`i_item_id` > 0)
+     """.stripMargin.trim),
+    ("case_2",
+     s"""
+        |SELECT i_item_id
+        |FROM Item
+        |WHERE i_item_sk = 1
+     """.stripMargin.trim,
+     s"""
+        |SELECT i_item_id, i_item_sk
+        |FROM Item
+        |WHERE i_item_sk = 1 or i_item_id > 0
+     """.stripMargin.trim,
+     s"""
+        |SELECT item.`i_item_id`, item.`i_item_sk` 
+        |FROM
+        |  item
+        |WHERE
+        |  ((item.`i_item_sk` = 1) OR (item.`i_item_id` > 0))
+     """.stripMargin.trim),
+    ("case_3",
+     s"""
+        |SELECT faid, flid, date
+        |FROM Fact
+     """.stripMargin.trim,
+     s"""
+        |SELECT faid, flid, year(date) as year
+        |FROM Fact
+     """.stripMargin.trim,
+     s"""
+        |SELECT gen_subsumer_0.`faid`, gen_subsumer_0.`flid`, year(CAST(gen_subsumer_0.`date` AS DATE)) AS `year` 
+        |FROM
+        |  (SELECT fact.`faid`, fact.`flid`, fact.`date` 
+        |  FROM
+        |    fact) gen_subsumer_0
+     """.stripMargin.trim),
+    ("case_4",
+     s"""
+        |SELECT faid, flid, date
+        |FROM Fact
+     """.stripMargin.trim,
+     s"""
+        |SELECT faid, flid
+        |FROM Fact
+        |WHERE year(date) = 2000
+     """.stripMargin.trim,
+     s"""
+        |SELECT gen_subsumer_0.`faid`, gen_subsumer_0.`flid` 
+        |FROM
+        |  (SELECT fact.`faid`, fact.`flid`, fact.`date` 
+        |  FROM
+        |    fact) gen_subsumer_0 
+        |WHERE
+        |  (year(CAST(gen_subsumer_0.`date` AS DATE)) = 2000)
+     """.stripMargin.trim),
+    ("case_5",
+     s"""
+        |SELECT faid, flid, date
+        |FROM Fact
+        |WHERE year(date) = 2000
+     """.stripMargin.trim,
+     s"""
+        |SELECT faid, flid
+        |FROM Fact
+        |WHERE year(date) = 2000
+     """.stripMargin.trim,
+     s"""
+        |SELECT gen_subsumer_0.`faid`, gen_subsumer_0.`flid` 
+        |FROM
+        |  (SELECT fact.`faid`, fact.`flid`, fact.`date` 
+        |  FROM
+        |    fact
+        |  WHERE
+        |    (year(CAST(fact.`date` AS DATE)) = 2000)) gen_subsumer_0 
+        |WHERE
+        |  (year(CAST(gen_subsumer_0.`date` AS DATE)) = 2000)
+     """.stripMargin.trim),
+    ("case_6",
+     s"""
+        |SELECT faid, flid, date
+        |FROM Fact
+        |WHERE year(date) in (2000,2001)
+     """.stripMargin.trim,
+     s"""
+        |SELECT faid, flid
+        |FROM Fact
+        |WHERE year(date) = 2000
+     """.stripMargin.trim,
+     s"""
+        |SELECT fact.`faid`, fact.`flid` 
+        |FROM
+        |  fact
+        |WHERE
+        |  (year(CAST(fact.`date` AS DATE)) = 2000)
+     """.stripMargin.trim),
+    ("case_7",
+     s"""
+        |SELECT faid, flid, year(date) as year, count(*) as cnt
+        |FROM Fact
+        |GROUP BY faid, flid, year(date)
+     """.stripMargin.trim,
+     s"""
+        |SELECT faid, year(date) as year, count(*) as cnt
+        |FROM Fact
+        |GROUP BY Fact.faid,year(Fact.date)
+        |HAVING count(*) > 2
+     """.stripMargin.trim,
+     s"""
+        |SELECT gen_subsumer_0.`faid`, gen_subsumer_0.`year` AS `year`, sum(gen_subsumer_0.`cnt`) AS `cnt` 
+        |FROM
+        |  (SELECT fact.`faid`, fact.`flid`, year(CAST(fact.`date` AS DATE)) AS `year`, count(1) AS `cnt` 
+        |  FROM
+        |    fact
+        |  GROUP BY fact.`faid`, fact.`flid`, year(CAST(fact.`date` AS DATE))) gen_subsumer_0 
+        |GROUP BY gen_subsumer_0.`faid`, gen_subsumer_0.`year`
+        |HAVING (sum(gen_subsumer_0.`cnt`) > 2L)
+     """.stripMargin.trim),
+    ("case_8",
+     s"""
+        |SELECT date
+        |FROM Fact
+     """.stripMargin.trim,
+     s"""
+        |SELECT year(date)
+        |FROM Fact
+     """.stripMargin.trim,
+     s"""
+        |SELECT year(CAST(gen_subsumer_0.`date` AS DATE)) AS `year(CAST(date AS DATE))` 
+        |FROM
+        |  (SELECT fact.`date` 
+        |  FROM
+        |    fact) gen_subsumer_0
+     """.stripMargin.trim),
+    ("case_9",
+     s"""
+        |SELECT faid, flid
+        |FROM Fact
+        |WHERE faid > 0
+     """.stripMargin.trim,
+     s"""
+        |SELECT faid
+        |FROM Fact
+        |WHERE faid > 0 AND flid > 0
+     """.stripMargin.trim,
+     s"""
+        |SELECT gen_subsumer_0.`faid` 
+        |FROM
+        |  (SELECT fact.`faid`, fact.`flid` 
+        |  FROM
+        |    fact
+        |  WHERE
+        |    (fact.`faid` > 0)) gen_subsumer_0 
+        |WHERE
+        |  (gen_subsumer_0.`faid` > 0) AND (gen_subsumer_0.`flid` > 0)
+     """.stripMargin.trim),
+    ("case_10",
+     s"""
+        |SELECT faid, flid
+        |FROM Fact
+        |WHERE faid > 0
+     """.stripMargin.trim,
+     s"""
+        |SELECT faid
+        |FROM Fact
+        |WHERE faid > 0 OR flid > 0
+     """.stripMargin.trim,
+     s"""
+        |SELECT fact.`faid` 
+        |FROM
+        |  fact
+        |WHERE
+        |  ((fact.`faid` > 0) OR (fact.`flid` > 0))
+     """.stripMargin.trim))
+}
\ No newline at end of file


[07/50] [abbrv] carbondata git commit: [CARBONDATA-2441][Datamap] Implement distribute interface for bloom datamap

Posted by ma...@apache.org.
[CARBONDATA-2441][Datamap] Implement distribute interface for bloom datamap

Implement distribute interface for bloom datamap

This closes #2272


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/452c42b9
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/452c42b9
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/452c42b9

Branch: refs/heads/spark-2.3
Commit: 452c42b994e6b4c0056b3cf097ebdb579b8d1e70
Parents: 6b94971
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Sat May 5 13:51:34 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Wed May 9 10:27:24 2018 +0800

----------------------------------------------------------------------
 .../bloom/BloomCoarseGrainDataMapFactory.java   | 60 +++++++++++++++++++-
 .../bloom/BloomDataMapDistributable.java        | 37 ++++++++++++
 2 files changed, 95 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/452c42b9/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
index 4e62526..1d6eab7 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
@@ -19,6 +19,7 @@ package org.apache.carbondata.datamap.bloom;
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Objects;
 
@@ -29,13 +30,16 @@ import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.datamap.DataMapDistributable;
 import org.apache.carbondata.core.datamap.DataMapLevel;
 import org.apache.carbondata.core.datamap.DataMapMeta;
+import org.apache.carbondata.core.datamap.DataMapStoreManager;
 import org.apache.carbondata.core.datamap.Segment;
+import org.apache.carbondata.core.datamap.TableDataMap;
 import org.apache.carbondata.core.datamap.dev.DataMapFactory;
 import org.apache.carbondata.core.datamap.dev.DataMapModel;
 import org.apache.carbondata.core.datamap.dev.DataMapRefresher;
 import org.apache.carbondata.core.datamap.dev.DataMapWriter;
 import org.apache.carbondata.core.datamap.dev.cgdatamap.CoarseGrainDataMap;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
+import org.apache.carbondata.core.datastore.filesystem.CarbonFileFilter;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.features.TableOperation;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
@@ -196,12 +200,64 @@ public class BloomCoarseGrainDataMapFactory extends DataMapFactory<CoarseGrainDa
   @Override
   public List<CoarseGrainDataMap> getDataMaps(DataMapDistributable distributable)
       throws IOException {
-    return null;
+    List<CoarseGrainDataMap> coarseGrainDataMaps = new ArrayList<>();
+    BloomCoarseGrainDataMap bloomCoarseGrainDataMap = new BloomCoarseGrainDataMap();
+    String indexPath = ((BloomDataMapDistributable) distributable).getIndexPath();
+    bloomCoarseGrainDataMap.init(new DataMapModel(indexPath));
+    coarseGrainDataMaps.add(bloomCoarseGrainDataMap);
+    return coarseGrainDataMaps;
+  }
+
+  /**
+   * returns all the directories of lucene index files for query
+   * Note: copied from luceneDataMapFactory, will extract to a common interface
+   */
+  private CarbonFile[] getAllIndexDirs(String tablePath, String segmentId) {
+    List<CarbonFile> indexDirs = new ArrayList<>();
+    List<TableDataMap> dataMaps;
+    try {
+      // there can be multiple bloom datamaps present on a table, so get all datamaps and form
+      // the path till the index file directories in all datamaps folders present in each segment
+      dataMaps = DataMapStoreManager.getInstance().getAllDataMap(getCarbonTable());
+    } catch (IOException ex) {
+      LOGGER.error(ex, String.format("failed to get datamaps for tablePath %s, segmentId %s",
+          tablePath, segmentId));
+      throw new RuntimeException(ex);
+    }
+    if (dataMaps.size() > 0) {
+      for (TableDataMap dataMap : dataMaps) {
+        List<CarbonFile> indexFiles;
+        String dmPath = CarbonTablePath.getSegmentPath(tablePath, segmentId) + File.separator
+            + dataMap.getDataMapSchema().getDataMapName();
+        FileFactory.FileType fileType = FileFactory.getFileType(dmPath);
+        final CarbonFile dirPath = FileFactory.getCarbonFile(dmPath, fileType);
+        indexFiles = Arrays.asList(dirPath.listFiles(new CarbonFileFilter() {
+          @Override
+          public boolean accept(CarbonFile file) {
+            return file.isDirectory();
+          }
+        }));
+        indexDirs.addAll(indexFiles);
+      }
+    }
+    return indexDirs.toArray(new CarbonFile[0]);
   }
 
   @Override
   public List<DataMapDistributable> toDistributable(Segment segment) {
-    return null;
+    List<DataMapDistributable> dataMapDistributableList = new ArrayList<>();
+    CarbonFile[] indexDirs =
+        getAllIndexDirs(getCarbonTable().getTablePath(), segment.getSegmentNo());
+    for (CarbonFile indexDir : indexDirs) {
+      // Filter out the tasks which are filtered through CG datamap.
+      if (!segment.getFilteredIndexShardNames().contains(indexDir.getName())) {
+        continue;
+      }
+      DataMapDistributable bloomDataMapDistributable = new BloomDataMapDistributable(
+          indexDir.getAbsolutePath());
+      dataMapDistributableList.add(bloomDataMapDistributable);
+    }
+    return dataMapDistributableList;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/carbondata/blob/452c42b9/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapDistributable.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapDistributable.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapDistributable.java
new file mode 100644
index 0000000..86d6932
--- /dev/null
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapDistributable.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.datamap.bloom;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.core.datamap.DataMapDistributable;
+
+@InterfaceAudience.Internal
+class BloomDataMapDistributable extends DataMapDistributable {
+  /**
+   * parent folder of the bloomindex file
+   */
+  private String indexPath;
+
+  BloomDataMapDistributable(String indexPath) {
+    this.indexPath = indexPath;
+  }
+
+  public String getIndexPath() {
+    return indexPath;
+  }
+}


[47/50] [abbrv] carbondata git commit: [CARBONDATA-2486][DOC] Update set search mode information in the documentation

Posted by ma...@apache.org.
[CARBONDATA-2486][DOC] Update set search mode information in the documentation

Update set search mode information in the documentation

This closes #2312


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1d302a8b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1d302a8b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1d302a8b

Branch: refs/heads/spark-2.3
Commit: 1d302a8b563c80e762985e10ed2bf9c9917e812d
Parents: fc4b7f9
Author: xubo245 <60...@qq.com>
Authored: Wed May 16 20:43:59 2018 +0800
Committer: chenliang613 <ch...@huawei.com>
Committed: Thu May 17 11:55:19 2018 +0800

----------------------------------------------------------------------
 docs/configuration-parameters.md | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1d302a8b/docs/configuration-parameters.md
----------------------------------------------------------------------
diff --git a/docs/configuration-parameters.md b/docs/configuration-parameters.md
index e39d61b..11cc6ea 100644
--- a/docs/configuration-parameters.md
+++ b/docs/configuration-parameters.md
@@ -133,7 +133,8 @@ This section provides the details of all the configurations required for CarbonD
 | carbon.enableMinMax | true | Min max is feature added to enhance query performance. To disable this feature, set it false. |
 | carbon.dynamicallocation.schedulertimeout | 5 | Specifies the maximum time (unit in seconds) the scheduler can wait for executor to be active. Minimum value is 5 sec and maximum value is 15 sec. |
 | carbon.scheduler.minregisteredresourcesratio | 0.8 | Specifies the minimum resource (executor) ratio needed for starting the block distribution. The default value is 0.8, which indicates 80% of the requested resource is allocated for starting block distribution.  The minimum value is 0.1 min and the maximum value is 1.0. | 
-  
+| carbon.search.enabled | false | If set to true, it will use CarbonReader to do distributed scan directly instead of using compute framework like spark, thus avoiding limitation of compute framework like SQL optimizer and task scheduling overhead. |
+
 * **Global Dictionary Configurations**
   
 | Parameter | Default Value | Description |


[48/50] [abbrv] carbondata git commit: [CARBONDATA-2479] Multiple issue fixes in SDK writer and external table flow

Posted by ma...@apache.org.
[CARBONDATA-2479] Multiple issue fixes in SDK writer and external table flow

[CARBONDATA-2479] Multiple issues:
fixed external table path display
fixed default value for array in AVRO
fixed NPE when delete folder before the second select query
fixed: avro float value precision change issue

This closes #2306


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/cf1b50bc
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/cf1b50bc
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/cf1b50bc

Branch: refs/heads/spark-2.3
Commit: cf1b50bcc697be5353be469737a7dacdc57b1d7e
Parents: 1d302a8
Author: ajantha-bhat <aj...@gmail.com>
Authored: Mon May 14 15:28:23 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu May 17 18:47:04 2018 +0530

----------------------------------------------------------------------
 .../LatestFilesReadCommittedScope.java          |  2 +-
 .../TestNonTransactionalCarbonTable.scala       | 82 ++++++++++++++++++--
 .../table/CarbonDescribeFormattedCommand.scala  |  7 +-
 .../carbondata/sdk/file/AvroCarbonWriter.java   | 12 +--
 .../carbondata/sdk/file/CarbonReaderTest.java   |  2 +-
 5 files changed, 85 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/cf1b50bc/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java b/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
index 2306330..6106174 100644
--- a/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
+++ b/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
@@ -138,7 +138,7 @@ public class LatestFilesReadCommittedScope implements ReadCommittedScope {
   @Override public void takeCarbonIndexFileSnapShot() throws IOException {
     // Read the current file Path get the list of indexes from the path.
     CarbonFile file = FileFactory.getCarbonFile(carbonFilePath);
-    if (file == null) {
+    if (file.listFiles().length == 0) {
       // For nonTransactional table, files can be removed at any point of time.
       // So cannot assume files will be present
       throw new IOException("No files are present in the table location :" + carbonFilePath);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cf1b50bc/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index 5ab1c60..cc3cbb5 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -1050,7 +1050,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         | "type": "record",
         | "fields": [
         |  { "name": "name", "type": "string"},
-        |  { "name": "age", "type": "int"},
+        |  { "name": "age", "type": "float"},
         |  { "name": "address",  "type": {
         |    "type" : "record",  "name" : "my_address",
         |        "fields" : [
@@ -1059,11 +1059,11 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |]}
       """.stripMargin
 
-    val json = """ {"name":"bob", "age":10, "address" : {"street":"abc", "city":"bang"}} """
+    val json = """ {"name":"bob", "age":10.24, "address" : {"street":"abc", "city":"bang"}} """
 
     val fields = new Array[Field](3)
     fields(0) = new Field("name", DataTypes.STRING)
-    fields(1) = new Field("age", DataTypes.INT)
+    fields(1) = new Field("age", DataTypes.DOUBLE)
     val fld = new util.ArrayList[StructField]
     fld.add(new StructField("street", DataTypes.STRING))
     fld.add(new StructField("city", DataTypes.STRING))
@@ -1340,11 +1340,10 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
          |'$writerPath' """.stripMargin)
 
-
     checkAnswer(sql("select * from sdkOutputTable"), Seq(
-      Row("bob", 10, Row("abc","bang")),
-      Row("bob", 10, Row("abc","bang")),
-      Row("bob", 10, Row("abc","bang"))))
+      Row("bob", 10.24, Row("abc","bang")),
+      Row("bob", 10.24, Row("abc","bang")),
+      Row("bob", 10.24, Row("abc","bang"))))
 
     sql("DROP TABLE sdkOutputTable")
     // drop table should not delete the files
@@ -1372,6 +1371,75 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
+  // array type Default value test
+  def buildAvroTestDataArrayDefaultType(rows: Int, options: util.Map[String, String]): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+
+    val mySchema = """ {
+                     |      "name": "address",
+                     |      "type": "record",
+                     |      "fields": [
+                     |      {
+                     |      "name": "name",
+                     |      "type": "string"
+                     |      },
+                     |      {
+                     |      "name": "age",
+                     |      "type": "int"
+                     |      },
+                     |      {
+                     |      "name": "address",
+                     |      "type": {
+                     |      "type": "array",
+                     |      "items": "string"
+                     |      },
+                     |      "default": ["sc","ab"]
+                     |      }
+                     |      ]
+                     |  }
+                   """.stripMargin
+
+    // skip giving array value to take default values
+    val json: String = "{\"name\": \"bob\",\"age\": 10}"
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+    // fields[1] = new Field("age", DataTypes.INT);
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("street", DataTypes.STRING))
+    fields(2) = new Field("address", "array", fld)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+  }
+
+  def buildAvroTestDataSingleFileArrayDefaultType(): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+    buildAvroTestDataArrayDefaultType(3, null)
+  }
+
+  test("Read sdk writer Avro output Array Type with Default value") {
+    buildAvroTestDataSingleFileArrayDefaultType()
+    assert(new File(writerPath).exists())
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    sql("select * from sdkOutputTable").show(200,false)
+
+    checkAnswer(sql("select * from sdkOutputTable"), Seq(
+      Row("bob", 10, new mutable.WrappedArray.ofRef[String](Array("sc", "ab"))),
+      Row("bob", 10, new mutable.WrappedArray.ofRef[String](Array("sc", "ab"))),
+      Row("bob", 10, new mutable.WrappedArray.ofRef[String](Array("sc", "ab")))))
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    assert(new File(writerPath).listFiles().length > 0)
+    cleanTestData()
+  }
+
+
   test("Read sdk writer Avro output with both Array and Struct Type") {
     buildAvroTestDataBothStructArrayType()
     assert(new File(writerPath).exists())

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cf1b50bc/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
index 375c413..ce03959 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
@@ -86,12 +86,7 @@ private[sql] case class CarbonDescribeFormattedCommand(
     results ++= Seq(("Database Name", relation.carbonTable.getDatabaseName, "")
     )
     results ++= Seq(("Table Name", relation.carbonTable.getTableName, ""))
-    if (!carbonTable.isExternalTable) {
-      results ++= Seq(("CARBON Store Path ", CarbonProperties.getStorePath, ""))
-    } else {
-      // for external table should show files path.
-      results ++= Seq(("CARBON Store Path ", carbonTable.getTablePath, ""))
-    }
+    results ++= Seq(("CARBON Store Path ", carbonTable.getTablePath, ""))
 
     val tblProps = carbonTable.getTableInfo.getFactTable.getTableProperties
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cf1b50bc/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
index 137e3f4..9f2f295 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
@@ -97,8 +97,9 @@ public class AvroCarbonWriter extends CarbonWriter {
         out = fieldValue;
         break;
       case FLOAT:
-        Float f = (Float) fieldValue;
-        out = f.doubleValue();
+        // direct conversion will change precision. So parse from string.
+        // also carbon internally needs float as double
+        out = Double.parseDouble(fieldValue.toString());
         break;
       case RECORD:
         List<Schema.Field> fields = avroField.schema().getFields();
@@ -121,7 +122,8 @@ public class AvroCarbonWriter extends CarbonWriter {
           arrayChildObjects = new Object[size];
           for (int i = 0; i < size; i++) {
             Object childObject = avroFieldToObject(
-                new Schema.Field(avroField.name(), avroField.schema().getElementType(), null, true),
+                new Schema.Field(avroField.name(), avroField.schema().getElementType(),
+                    avroField.doc(), avroField.defaultVal()),
                 ((GenericData.Array) fieldValue).get(i));
             if (childObject != null) {
               arrayChildObjects[i] = childObject;
@@ -132,8 +134,8 @@ public class AvroCarbonWriter extends CarbonWriter {
           arrayChildObjects = new Object[size];
           for (int i = 0; i < size; i++) {
             Object childObject = avroFieldToObject(
-                new Schema.Field(avroField.name(), avroField.schema().getElementType(), null, true),
-                ((ArrayList) fieldValue).get(i));
+                new Schema.Field(avroField.name(), avroField.schema().getElementType(),
+                    avroField.doc(), avroField.defaultVal()), ((ArrayList) fieldValue).get(i));
             if (childObject != null) {
               arrayChildObjects[i] = childObject;
             }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cf1b50bc/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CarbonReaderTest.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CarbonReaderTest.java b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CarbonReaderTest.java
index 08d6e52..f2c6d45 100644
--- a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CarbonReaderTest.java
+++ b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CarbonReaderTest.java
@@ -162,7 +162,7 @@ public class CarbonReaderTest {
       i++;
     }
     Assert.assertEquals(i, 100);
-
+    reader.close();
     FileUtils.deleteDirectory(new File(path));
   }
 }


[12/50] [abbrv] carbondata git commit: [HOTFIX]Add new modules to coverage report and make package name consistent with directory structure

Posted by ma...@apache.org.
[HOTFIX]Add new modules to coverage report and make package name consistent with directory structure

This closes #2285


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/e70b6b44
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/e70b6b44
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/e70b6b44

Branch: refs/heads/spark-2.3
Commit: e70b6b446c829abea3954fd852944957ba13522e
Parents: cf3e919
Author: sraghunandan <ca...@gmail.com>
Authored: Wed May 9 12:36:31 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Wed May 9 18:12:50 2018 +0530

----------------------------------------------------------------------
 integration/spark-common-test/pom.xml           | 50 +++++++++++++++++++-
 .../datasources/SparkCarbonFileFormat.scala     |  3 +-
 ....apache.spark.sql.sources.DataSourceRegister |  2 +-
 pom.xml                                         | 10 ++++
 4 files changed, 62 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/e70b6b44/integration/spark-common-test/pom.xml
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/pom.xml b/integration/spark-common-test/pom.xml
index 9f184e6..ae78523 100644
--- a/integration/spark-common-test/pom.xml
+++ b/integration/spark-common-test/pom.xml
@@ -42,6 +42,11 @@
     <build.directory.projectSparkCommonTest>../../integration/spark-common-test/target</build.directory.projectSparkCommonTest>
     <build.directory.projectHive>../../integration/hive/target</build.directory.projectHive>
     <build.directory.projectPresto>../../integration/presto/target</build.directory.projectPresto>
+    <build.directory.projectStoreSdk>../../store/sdk/target</build.directory.projectStoreSdk>
+    <build.directory.projectStoreSearch>../../store/search/target</build.directory.projectStoreSearch>
+    <build.directory.projectStreaming>../../streaming/target</build.directory.projectStreaming>
+    <build.directory.projectBloom>../../datamap/bloom/target</build.directory.projectBloom>
+    <build.directory.projectLucene>../../datamap/lucene/target</build.directory.projectLucene>
 
     <classes.directory.projectCommon>../../common/target/classes</classes.directory.projectCommon>
     <classes.directory.projectCore>../../core/target/classes</classes.directory.projectCore>
@@ -54,6 +59,11 @@
     <classes.directory.projectSparkCommonTest>../../integration/spark-common-test/target/classes</classes.directory.projectSparkCommonTest>
     <classes.directory.projectHive>../../integration/hive/target/classes</classes.directory.projectHive>
     <classes.directory.projectPresto>../../integration/presto/target/classes</classes.directory.projectPresto>
+    <classes.directory.projectStoreSdk>../../store/sdk/target/classes</classes.directory.projectStoreSdk>
+    <classes.directory.projectStoreSearch>../../store/search/target/classes</classes.directory.projectStoreSearch>
+    <classes.directory.projectStreaming>../../streaming/target/classes</classes.directory.projectStreaming>
+    <classes.directory.projectBloom>../../datamap/bloom/target/classes</classes.directory.projectBloom>
+    <classes.directory.projectLucene>../../datamap/lucene/target/classes</classes.directory.projectLucene>
 
     <sources.directory.projectCommon>../../common/src/main/java</sources.directory.projectCommon>
     <sources.directory.projectCore>../../core/src/main/java</sources.directory.projectCore>
@@ -70,6 +80,13 @@
     <sources.directory.projectHive>../../integration/hive/src/main/scala</sources.directory.projectHive>
     <sources.directory.projectPresto>../../integration/presto/src/main/java</sources.directory.projectPresto>
     <sources.directory.projectPresto>../../integration/presto/src/main/scala</sources.directory.projectPresto>
+    <sources.directory.projectStoreSdk>../../store/sdk/src/main/java</sources.directory.projectStoreSdk>
+    <sources.directory.projectStoreSearch>../../store/search/src/main/java</sources.directory.projectStoreSearch>
+    <sources.directory.projectStoreSearch>../../store/search/src/main/scala</sources.directory.projectStoreSearch>
+    <sources.directory.projectStreaming>../../streaming/src/main/java</sources.directory.projectStreaming>
+    <sources.directory.projectStreaming>../../streaming/src/main/scala</sources.directory.projectStreaming>
+    <sources.directory.projectBloom>../../datamap/bloom/src/main/java</sources.directory.projectBloom>
+    <sources.directory.projectLucene>../../datamap/lucene/src/main/java</sources.directory.projectLucene>
 
     <generated-sources.directory.projectCommon>../../common/target/generated-sources/annotations</generated-sources.directory.projectCommon>
     <generated-sources.directory.projectCore>../../core/target/generated-sources/annotations</generated-sources.directory.projectCore>
@@ -82,6 +99,12 @@
     <generated-sources.directory.projectSparkCommonTest>../../integration/spark-common-test/target/generated-sources/annotations</generated-sources.directory.projectSparkCommonTest>
     <generated-sources.directory.projectHive>../../integration/hive/target/generated-sources/annotations</generated-sources.directory.projectHive>
     <generated-sources.directory.projectPresto>../../integration/presto/target/generated-sources/annotations</generated-sources.directory.projectPresto>
+    <generated-sources.directory.projectStoreSdk>../../store/sdk/target/generated-sources/annotations</generated-sources.directory.projectStoreSdk>
+    <generated-sources.directory.projectStoreSearch>../../store/search/target/generated-sources/annotations</generated-sources.directory.projectStoreSearch>
+    <generated-sources.directory.projectStreaming>../../streaming/target/generated-sources/annotations</generated-sources.directory.projectStreaming>
+    <generated-sources.directory.projectBloom>../../datamap/bloom/target/generated-sources/annotations</generated-sources.directory.projectBloom>
+    <generated-sources.directory.projectLucene>../../datamap/lucene/target/generated-sources/annotations</generated-sources.directory.projectLucene>
+
   </properties>
 
 
@@ -313,6 +336,21 @@
                     <fileset dir="${build.directory.projectPresto}" erroronmissingdir="false">
                       <include name="jacoco.exec" />
                     </fileset>
+                    <fileset dir="${build.directory.projectStoreSdk}" erroronmissingdir="false">
+                      <include name="jacoco.exec" />
+                    </fileset>
+                    <fileset dir="${build.directory.projectStoreSearch}" erroronmissingdir="false">
+                      <include name="jacoco.exec" />
+                    </fileset>
+                    <fileset dir="${build.directory.projectStreaming}" erroronmissingdir="false">
+                      <include name="jacoco.exec" />
+                    </fileset>
+                    <fileset dir="${build.directory.projectBloom}" erroronmissingdir="false">
+                      <include name="jacoco.exec" />
+                    </fileset>
+                    <fileset dir="${build.directory.projectLucene}" erroronmissingdir="false">
+                      <include name="jacoco.exec" />
+                    </fileset>
 
                   </executiondata>
                   <structure name="jacoco-CarbonData Coverage Project">
@@ -329,7 +367,11 @@
                         <fileset dir="${classes.directory.projectSparkCommonTest}" />
                         <fileset dir="${classes.directory.projectHive}" erroronmissingdir="false" />
                         <fileset dir="${classes.directory.projectPresto}" erroronmissingdir="false" />
-
+                        <fileset dir="${classes.directory.projectStoreSdk}" erroronmissingdir="false" />
+                        <fileset dir="${classes.directory.projectStoreSearch}" erroronmissingdir="false" />
+                        <fileset dir="${classes.directory.projectStreaming}" erroronmissingdir="false" />
+                        <fileset dir="${classes.directory.projectBloom}" erroronmissingdir="false" />
+                        <fileset dir="${classes.directory.projectLucene}" erroronmissingdir="false" />
                       </classfiles>
                       <sourcefiles encoding="UTF-8">
                         <fileset dir="${sources.directory.projectCommon}" />
@@ -342,6 +384,12 @@
                         <fileset dir="${sources.directory.projectSparkCommon}" />
                         <fileset dir="${sources.directory.projectHive}" erroronmissingdir="false" />
                         <fileset dir="${sources.directory.projectPresto}" erroronmissingdir="false" />
+                        <fileset dir="${sources.directory.projectStoreSdk}" erroronmissingdir="false" />
+                        <fileset dir="${sources.directory.projectStoreSearch}" erroronmissingdir="false" />
+                        <fileset dir="${sources.directory.projectStreaming}" erroronmissingdir="false" />
+                        <fileset dir="${sources.directory.projectBloom}" erroronmissingdir="false" />
+                        <fileset dir="${sources.directory.projectLucene}" erroronmissingdir="false" />
+
                       </sourcefiles>
                     </group>
                   </structure>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e70b6b44/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
index 4f60297..35db1f5 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.sql
+package org.apache.spark.sql.execution.datasources
 
 import java.net.URI
 
@@ -34,6 +34,7 @@ import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.execution.datasources._
 import org.apache.spark.sql.execution.datasources.text.TextOutputWriter
 import org.apache.spark.sql.optimizer.CarbonFilters
+import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.sources.{DataSourceRegister, Filter}
 import org.apache.spark.sql.types.{AtomicType, StructField, StructType}
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e70b6b44/integration/spark2/src/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister
----------------------------------------------------------------------
diff --git a/integration/spark2/src/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister b/integration/spark2/src/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister
index 5831f3e..4958a75 100644
--- a/integration/spark2/src/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister
+++ b/integration/spark2/src/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister
@@ -15,4 +15,4 @@
 ## limitations under the License.
 ## ------------------------------------------------------------------------
 org.apache.spark.sql.CarbonSource
-org.apache.spark.sql.SparkCarbonFileFormat
\ No newline at end of file
+org.apache.spark.sql.execution.datasources.SparkCarbonFileFormat
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e70b6b44/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 9e629f3..7c460e2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -516,6 +516,11 @@
                 <sourceDirectory>${basedir}/integration/presto/src/main/java</sourceDirectory>
                 <sourceDirectory>${basedir}/streaming/src/main/java</sourceDirectory>
                 <sourceDirectory>${basedir}/streaming/src/main/scala</sourceDirectory>
+                <sourceDirectory>${basedir}/store/sdk/src/main/java</sourceDirectory>
+                <sourceDirectory>${basedir}/store/search/src/main/scala</sourceDirectory>
+                <sourceDirectory>${basedir}/store/search/src/main/java</sourceDirectory>
+                <sourceDirectory>${basedir}/datamap/bloom/src/main/java</sourceDirectory>
+                <sourceDirectory>${basedir}/datamap/lucene/src/main/java</sourceDirectory>
               </sourceDirectories>
             </configuration>
           </plugin>
@@ -572,6 +577,11 @@
                 <sourceDirectory>${basedir}/integration/presto/src/main/java</sourceDirectory>
                 <sourceDirectory>${basedir}/streaming/src/main/java</sourceDirectory>
                 <sourceDirectory>${basedir}/streaming/src/main/scala</sourceDirectory>
+                <sourceDirectory>${basedir}/store/sdk/src/main/java</sourceDirectory>
+                <sourceDirectory>${basedir}/store/search/src/main/scala</sourceDirectory>
+                <sourceDirectory>${basedir}/store/search/src/main/java</sourceDirectory>
+                <sourceDirectory>${basedir}/datamap/bloom/src/main/java</sourceDirectory>
+                <sourceDirectory>${basedir}/datamap/lucene/src/main/java</sourceDirectory>
               </sourceDirectories>
             </configuration>
           </plugin>


[14/50] [abbrv] carbondata git commit: [CARBONDATA-2464]Fixed OOM issue in case of Complex type

Posted by ma...@apache.org.
[CARBONDATA-2464]Fixed OOM issue in case of Complex type

Problem: Query with Complex type is failing with OOM

Root Cause: Complex type child column(No-dictionary) values are written in LV format, while reading the data it will read length then based on length it is reading the data. Converting byte array to int is giving wrong length value, because of this it's trying to create big memory chunk and as memory is not available in Unsafe it is failing with OOM.

Code issue: While converting byte array to int it is not masking the the byte values and because of this is giving wrong integer value.

Solution: Mask each byte and then left shift the bits

This closes #2288


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/ceb7c8dd
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/ceb7c8dd
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/ceb7c8dd

Branch: refs/heads/spark-2.3
Commit: ceb7c8dd1ced457c7ce34f016abf30102e4931a9
Parents: cc0cbba
Author: kumarvishal09 <ku...@gmail.com>
Authored: Wed May 9 17:04:21 2018 +0530
Committer: Jacky Li <ja...@qq.com>
Committed: Thu May 10 15:27:32 2018 +0800

----------------------------------------------------------------------
 .../main/java/org/apache/carbondata/core/util/ByteUtil.java    | 4 ++--
 .../java/org/apache/carbondata/core/util/ByteUtilTest.java     | 6 ++++++
 2 files changed, 8 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/ceb7c8dd/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
index d1c16bb..52fc3c3 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
@@ -517,8 +517,8 @@ public final class ByteUtil {
   }
 
   public static int toInt(byte[] bytes, int offset) {
-    return (((int)bytes[offset]) << 24) + (((int)bytes[offset + 1]) << 16) +
-        (((int)bytes[offset + 2]) << 8) + bytes[offset + 3];
+    return (((int)bytes[offset] & 0xff) << 24) + (((int)bytes[offset + 1] & 0xff) << 16) +
+        (((int)bytes[offset + 2] & 0xff) << 8) + ((int)bytes[offset + 3] & 0xff);
   }
 
   public static void setInt(byte[] data, int offset, int value) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ceb7c8dd/core/src/test/java/org/apache/carbondata/core/util/ByteUtilTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/util/ByteUtilTest.java b/core/src/test/java/org/apache/carbondata/core/util/ByteUtilTest.java
index d51e184..d93aa49 100644
--- a/core/src/test/java/org/apache/carbondata/core/util/ByteUtilTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/util/ByteUtilTest.java
@@ -113,6 +113,12 @@ public class ByteUtilTest extends TestCase {
         prepareBuffers();
         assertFalse(UnsafeComparer.INSTANCE.compareTo(buff1, buff2) < 0);
     }
+    @Test
+    public void testIntConversion() {
+        byte[] data = new byte[4];
+        ByteUtil.setInt(data, 0, 968);
+        assertEquals(ByteUtil.toInt(data, 0), 968);
+    }
 
     @Test
     public void testEqualToCase() {


[33/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/Utils.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/Utils.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/Utils.scala
new file mode 100644
index 0000000..074d369
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/Utils.scala
@@ -0,0 +1,358 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.rewrite
+
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeMap, Expression, PredicateHelper}
+import org.apache.spark.sql.catalyst.expressions.aggregate._
+
+import org.apache.carbondata.mv.plans.modular
+import org.apache.carbondata.mv.plans.modular.ModularPlan
+
+/**
+ * Utility functions used by mqo matcher to convert our plan to new aggregation code path
+ */
+private[rewrite] object Utils extends PredicateHelper {
+
+  // use for match qb_2a, qb_2q and sel_3a, sel_3q
+  private def doMatch(
+      operator_a: modular.Matchable,
+      operator_q: modular.Matchable,
+      alias_m: AttributeMap[Alias]): Option[modular.Matchable] = {
+    var matchable = true
+    val matched = operator_q.transformExpressions {
+      case cnt_q@AggregateExpression(Count(exprs_q), _, false, _) =>
+        operator_a.outputList.find {
+          case alias: Alias if alias_m.contains(alias.toAttribute) &&
+                               alias_m(alias.toAttribute).child.isInstanceOf[AggregateExpression] &&
+                               alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
+                                 .aggregateFunction.isInstanceOf[Count] =>
+            // case for groupby
+            val cnt_a = alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
+            val exprs_a = cnt_a.aggregateFunction.asInstanceOf[Count].children
+            if (cnt_a.isDistinct != cnt_q.isDistinct || exprs_q.length != exprs_a.length) {
+              false
+            } else {
+              exprs_a.sortBy(_.hashCode()).zip(exprs_q.sortBy(_.hashCode()))
+                .forall(p => p._1.semanticEquals(p._2))
+            }
+
+          case attr: Attribute if alias_m.contains(attr) &&
+                                  alias_m(attr).child.isInstanceOf[AggregateExpression] &&
+                                  alias_m(attr).child.asInstanceOf[AggregateExpression]
+                                    .aggregateFunction.isInstanceOf[Count] =>
+            val cnt_a = alias_m(attr).child.asInstanceOf[AggregateExpression]
+            val exprs_a = cnt_a.aggregateFunction.asInstanceOf[Count].children
+            if (cnt_a.isDistinct != cnt_q.isDistinct || exprs_q.length != exprs_a.length) {
+              false
+            } else {
+              exprs_a.sortBy(_.hashCode()).zip(exprs_q.sortBy(_.hashCode()))
+                .forall(p => p._1.semanticEquals(p._2))
+            }
+
+          case _ => false
+        }.map { cnt => AggregateExpression(
+            Sum(cnt.toAttribute),
+            cnt_q.mode,
+            isDistinct = false,
+            cnt_q.resultId)
+        }.getOrElse { matchable = false; cnt_q }
+
+      case sum_q@AggregateExpression(Sum(expr_q), _, false, _) =>
+        operator_a.outputList.find {
+          case alias: Alias if alias_m.contains(alias.toAttribute) &&
+                               alias_m(alias.toAttribute).child.isInstanceOf[AggregateExpression] &&
+                               alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
+                                 .aggregateFunction.isInstanceOf[Sum] =>
+            val sum_a = alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
+            val expr_a = sum_a.aggregateFunction.asInstanceOf[Sum].child
+            if (sum_a.isDistinct != sum_q.isDistinct) {
+              false
+            } else {
+              expr_a.semanticEquals(expr_q)
+            }
+
+          case attr: Attribute if alias_m.contains(attr) &&
+                                  alias_m(attr).child.isInstanceOf[AggregateExpression] &&
+                                  alias_m(attr).child.asInstanceOf[AggregateExpression]
+                                    .aggregateFunction.isInstanceOf[Sum] =>
+            val sum_a = alias_m(attr).child.asInstanceOf[AggregateExpression]
+            val expr_a = sum_a.aggregateFunction.asInstanceOf[Sum].child
+            if (sum_a.isDistinct != sum_q.isDistinct) {
+              false
+            } else {
+              expr_a.semanticEquals(expr_q)
+            }
+
+          case _ => false
+        }.map { sum => AggregateExpression(
+            Sum(sum.toAttribute),
+            sum_q.mode,
+            isDistinct = false,
+            sum_q.resultId)
+        }.getOrElse { matchable = false; sum_q }
+
+      case max_q@AggregateExpression(Max(expr_q), _, false, _) =>
+        operator_a.outputList.find {
+          case alias: Alias if alias_m.contains(alias.toAttribute) &&
+                               alias_m(alias.toAttribute).child.isInstanceOf[AggregateExpression] &&
+                               alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
+                                 .aggregateFunction.isInstanceOf[Max] =>
+            val max_a = alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
+            val expr_a = max_a.aggregateFunction.asInstanceOf[Max].child
+            if (max_a.isDistinct != max_q.isDistinct) {
+              false
+            } else {
+              expr_a.semanticEquals(expr_q)
+            }
+
+          case attr: Attribute if alias_m.contains(attr) &&
+                                  alias_m(attr).child.isInstanceOf[AggregateExpression] &&
+                                  alias_m(attr).child.asInstanceOf[AggregateExpression]
+                                    .aggregateFunction.isInstanceOf[Max] =>
+            val max_a = alias_m(attr).child.asInstanceOf[AggregateExpression]
+            val expr_a = max_a.aggregateFunction.asInstanceOf[Max].child
+            if (max_a.isDistinct != max_q.isDistinct) {
+              false
+            } else {
+              expr_a.semanticEquals(expr_q)
+            }
+
+          case _ => false
+        }.map { max => AggregateExpression(
+            Max(max.toAttribute),
+            max_q.mode,
+            isDistinct = false,
+            max_q.resultId)
+        }.getOrElse { matchable = false; max_q }
+
+      case min_q@AggregateExpression(Min(expr_q), _, false, _) =>
+        operator_a.outputList.find {
+          case alias: Alias if alias_m.contains(alias.toAttribute) &&
+                               alias_m(alias.toAttribute).child.isInstanceOf[AggregateExpression] &&
+                               alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
+                                 .aggregateFunction.isInstanceOf[Min] => {
+            val min_a = alias_m(alias.toAttribute).child.asInstanceOf[AggregateExpression]
+            val expr_a = min_a.aggregateFunction.asInstanceOf[Max].child
+            if (min_a.isDistinct != min_q.isDistinct) {
+              false
+            } else {
+              expr_a.semanticEquals(expr_q)
+            }
+          }
+          case attr: Attribute if alias_m.contains(attr) &&
+                                  alias_m(attr).child.isInstanceOf[AggregateExpression] &&
+                                  alias_m(attr).child.asInstanceOf[AggregateExpression]
+                                    .aggregateFunction.isInstanceOf[Min] => {
+            val min_a = alias_m(attr).child.asInstanceOf[AggregateExpression]
+            val expr_a = min_a.aggregateFunction.asInstanceOf[Max].child
+            if (min_a.isDistinct != min_q.isDistinct) {
+              false
+            } else {
+              expr_a.semanticEquals(expr_q)
+            }
+          }
+          case _ => false
+        }.map { min => AggregateExpression(
+            Min(min.toAttribute),
+            min_q.mode,
+            isDistinct = false,
+            min_q.resultId)
+        }.getOrElse { matchable = false; min_q }
+
+      case other: AggregateExpression =>
+        matchable = false
+        other
+
+      case expr: Expression if !expr.isInstanceOf[AggregateFunction] =>
+        operator_a.outputList.find {
+          case alias: Alias if alias_m.contains(alias.toAttribute) &&
+                               alias_m(alias.toAttribute).child.semanticEquals(expr) &&
+                               !alias_m(alias.toAttribute).child
+                                 .isInstanceOf[AggregateExpression] => true
+          case attr: Attribute if alias_m.contains(attr) &&
+                                  alias_m(attr).child.semanticEquals(expr) &&
+                                  !alias_m(attr).child.isInstanceOf[AggregateExpression] => true
+          case _ => false
+        }.map(_.toAttribute)
+         .getOrElse { expr }
+    }
+
+    if (matchable) {
+      Some(matched)
+    } else {
+      None
+    }
+  }
+
+  def tryMatch(a: modular.Matchable,
+      q: modular.Matchable,
+      m: AttributeMap[Alias]): Option[modular.Matchable] = {
+    if (a.getClass == q.getClass) {
+      doMatch(a, q, m)
+    } else {
+      None
+    }
+  }
+
+  /**
+   * (Subsumee) expression translation:
+   *
+   * The translation begins by creating a copy of the whole expression (step 1).  Then each input
+   * column is translated in turn.
+   * To translate an input column, we first find the child block that produces the input column
+   * and replace the input column with the
+   * associated output column expression (step 2).  The next step is to replace the translated
+   * expression with its equivalent output
+   * expression at the top of the child compensation (step 3).  Then, We recursively translate
+   * each new input column(except input
+   * columns produced by rejoin children) until we reach the bottom of the child compensation
+   * (step 4).  Finally, we find an
+   * equivalent output expression in subsumer (step 5).
+   *
+   * So given a subsumee expr, the translation follows the following path:
+   *
+   * top of subsumee --> child of subsumee --> top of compensation --> bottom of compensation -->
+   * top of subsumer
+   *
+   * To simplify this we assume in subsumer outputList of top select 1-1 corresponds to the
+   * outputList of groupby
+   * note that subsumer outputList is list of attributes and that of groupby is list of aliases
+   *
+   */
+  private def doTopSelectTranslation(exprE: Expression,
+      exprListR: Seq[Expression],
+      subsumee: ModularPlan,
+      subsumer: ModularPlan,
+      compensation: Option[ModularPlan]): Option[Expression] = {
+    (subsumer, subsumee, compensation) match {
+      // top selects whose children do not match exactly
+      // for simplicity, we assume outputList of subsumer is 1-1 corresponding to that of its
+      // immediately groupby child
+      case (
+        sel_3a@modular.Select(
+          _, _, _, _, _,
+          Seq(gb_2a@modular.GroupBy(
+            _, _, _, _, sel_2a@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)),
+          _, _, _, _),
+        sel_3q@modular.Select(
+          _, _, _, _, _, Seq(gb_2q@modular.GroupBy(_, _, _, _, _, _, _, _)), _, _, _, _),
+        Some(gb_2c@modular.GroupBy(
+          _, _, _, _, sel_2c@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _))
+        ) =>
+        if (sel_3q.predicateList.contains(exprE)) {
+          val expr1E = exprE.transform {
+            case attr: Attribute =>
+              gb_2c.outputList.lift(
+                gb_2q.outputList.indexWhere {
+                  case alias: Alias if alias.toAttribute.semanticEquals(attr) => true;
+                  case other => false
+                  }).getOrElse { attr }
+          }
+          if (expr1E.eq(exprE)) {
+            None
+          } else {
+            Some(expr1E)
+          }
+        }
+        else if (sel_3q.outputList.contains(exprE)) {
+          exprE match {
+            case attr: Attribute => // this subexpression must in subsumee select output list
+              gb_2c.outputList.lift(
+                gb_2q.outputList.indexWhere {
+                  case a if a.toAttribute.semanticEquals(attr) => true;
+                  case other => false
+                  })
+
+            case alias: Alias =>
+              gb_2c.outputList.lift(
+                gb_2q.outputList.indexWhere {
+                  case a if a.toAttribute.semanticEquals(alias.toAttribute) => true;
+                  case other => false
+                  })
+
+            case _ => None
+          }
+        } else if (sel_2c.predicateList.contains(exprE)) {
+          if (sel_2a.predicateList.exists(_.semanticEquals(exprE)) ||
+              canEvaluate(exprE, subsumer)) {
+            Some(exprE)
+          } else {
+            None
+          }
+        } else if (gb_2c.predicateList.contains(exprE)) {
+          if (gb_2a.outputList.exists {
+                case a: Alias if a.child.semanticEquals(exprE) => true;
+                case _ => false
+              } || canEvaluate(exprE, subsumer)) {
+            Some(exprE)
+          } else {
+            None
+          }
+        } else if (sel_2a.predicateList.exists(_.semanticEquals(exprE)) ||
+                   canEvaluate(exprE, subsumer)) {
+          Some(exprE)
+        } else {
+          None
+        }
+
+      case _ => None // TODO: implement this
+    }
+  }
+
+  private def isSemanticEquivalent(translatedExpr: Expression, subsumer: ModularPlan) = {
+    subsumer match {
+      // if subsumer has where clause, even if expr can be translated into new expr based on
+      // subsumer, the two may not be semantic equivalent
+      // TODO: refine this
+      case modular.Select(
+        _, _, predicateList, _, _,
+        Seq(modular.GroupBy(_, _, _, _, _, _, _, _)), _, _, _, _)
+        if predicateList.nonEmpty => false
+      case _ => true
+    }
+  }
+
+  /**
+   * derivable = translatable + semantic equivalent
+   *
+   * The translation method described above is also the first step in deriving a subsumee
+   * expression Eexp from the subsumer's output columns.  After translating
+   * Eexp to E'exp, deriavability can be established by making sure that the subsumer
+   * computes at its output certain necessary subexpressions of E'exp (or even the entire
+   * E'exp).  The problem that arises, however, is to determine the parts of E'exp that
+   * can/should be computed by the subsumer.
+   *
+   * In general, translation causes an expression to expand by replacing individual input
+   * columns with equivalent subexpressions.  Derivation is the reverse operation, where
+   * pieces of the translated expression are collapsed as they are computed along the
+   * derivation path.
+   */
+
+  def isDerivable(exprE: Expression,
+      exprListR: Seq[Expression],
+      subsumee: ModularPlan,
+      subsumer: ModularPlan,
+      compensation: Option[ModularPlan]): Boolean = {
+    val exprE1 = doTopSelectTranslation(exprE, exprListR, subsumee, subsumer, compensation)
+    exprE1 match {
+      case Some(e) => isSemanticEquivalent(e, subsumer)
+      case _ => false
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
new file mode 100644
index 0000000..184fdc1
--- /dev/null
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
@@ -0,0 +1,676 @@
+package org.apache.carbondata.mv.rewrite
+
+import java.io.File
+
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.execution.datasources.LogicalRelation
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+class MVCreateTestCase extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    drop()
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+    val projectPath = new File(this.getClass.getResource("/").getPath + "../../../../../")
+      .getCanonicalPath.replaceAll("\\\\", "/")
+    val integrationPath = s"$projectPath/integration"
+    val resourcesPath = s"$integrationPath/spark-common-test/src/test/resources"
+    sql(
+      """
+        | CREATE TABLE fact_table1 (empname String, designation String, doj Timestamp,
+        |  workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
+        |  utilization int,salary int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table1 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table1 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(
+      """
+        | CREATE TABLE fact_table2 (empname String, designation String, doj Timestamp,
+        |  workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
+        |  utilization int,salary int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table2 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table2 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+
+    sql(
+      """
+        | CREATE TABLE fact_table3 (empname String, designation String, doj Timestamp,
+        |  workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
+        |  utilization int,salary int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table3 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table3 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+
+    sql(
+      """
+        | CREATE TABLE fact_table4 (empname String, designation String, doj Timestamp,
+        |  workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
+        |  utilization int,salary int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table4 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table4 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(
+      """
+        | CREATE TABLE fact_table5 (empname String, designation String, doj Timestamp,
+        |  workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
+        |  utilization int,salary int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table5 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table5 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+
+    sql(
+      """
+        | CREATE TABLE fact_table6 (empname String, designation String, doj Timestamp,
+        |  workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
+        |  utilization int,salary int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table6 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data_big.csv' INTO TABLE fact_table6 OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+  }
+
+  test("test create datamap with simple and same projection") {
+    sql("drop datamap if exists datamap1")
+    sql("create datamap datamap1 using 'mv' as select empname, designation from fact_table1")
+    sql(s"rebuild datamap datamap1")
+    val df = sql("select empname,designation from fact_table1")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap1"))
+    checkAnswer(df, sql("select empname,designation from fact_table2"))
+    sql(s"drop datamap datamap1")
+  }
+
+  test("test create datamap with simple and sub projection") {
+    sql("drop datamap if exists datamap2")
+    sql("create datamap datamap2 using 'mv' as select empname, designation from fact_table1")
+    sql(s"rebuild datamap datamap2")
+    val df = sql("select empname from fact_table1")
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap2"))
+    checkAnswer(df, sql("select empname from fact_table2"))
+    sql(s"drop datamap datamap2")
+  }
+
+  test("test create datamap with simple and same projection with projection filter") {
+    sql("drop datamap if exists datamap3")
+    sql("create datamap datamap3 using 'mv' as select empname, designation from fact_table1")
+    sql(s"rebuild datamap datamap3")
+    val frame = sql("select empname, designation from fact_table1 where empname='shivani'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap3"))
+
+    checkAnswer(frame, sql("select empname, designation from fact_table2 where empname='shivani'"))
+    sql(s"drop datamap datamap3")
+  }
+
+  test("test create datamap with simple and sub projection with non projection filter") {
+    sql("create datamap datamap4 using 'mv' as select empname, designation from fact_table1")
+    sql(s"rebuild datamap datamap4")
+    val frame = sql("select designation from fact_table1 where empname='shivani'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap4"))
+    checkAnswer(frame, sql("select designation from fact_table2 where empname='shivani'"))
+    sql(s"drop datamap datamap4")
+  }
+
+  test("test create datamap with simple and sub projection with datamap filter") {
+    sql("create datamap datamap5 using 'mv' as select empname, designation from fact_table1 where empname='shivani'")
+    sql(s"rebuild datamap datamap5")
+    val frame = sql("select designation from fact_table1 where empname='shivani'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap5"))
+    checkAnswer(frame, sql("select designation from fact_table2 where empname='shivani'"))
+    sql(s"drop datamap datamap5")
+  }
+
+  test("test create datamap with simple and same projection with datamap filter ") {
+    sql("create datamap datamap6 using 'mv' as select empname, designation from fact_table1 where empname='shivani'")
+    sql(s"rebuild datamap datamap6")
+    val frame = sql("select empname,designation from fact_table1 where empname='shivani'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap6"))
+    checkAnswer(frame, sql("select empname,designation from fact_table2 where empname='shivani'"))
+    sql(s"drop datamap datamap6")
+  }
+
+  test("test create datamap with simple and same projection with datamap filter and extra query column filter") {
+    sql("create datamap datamap7 using 'mv' as select empname, designation from fact_table1 where empname='shivani'")
+    sql(s"rebuild datamap datamap7")
+    val frame = sql(
+      "select empname,designation from fact_table1 where empname='shivani' and designation='SA'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap7"))
+    checkAnswer(frame, sql("select empname,designation from fact_table2 where empname='shivani' and designation='SA'"))
+    sql(s"drop datamap datamap7")
+  }
+
+  test("test create datamap with simple and same projection with datamap filter and different column filter") {
+    sql("create datamap datamap8 using 'mv' as select empname, designation from fact_table1 where empname='shivani'")
+    sql(s"rebuild datamap datamap8")
+    val frame = sql("select empname,designation from fact_table1 where designation='SA'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(!verifyMVDataMap(analyzed, "datamap8"))
+    checkAnswer(frame, sql("select empname,designation from fact_table2 where designation='SA'"))
+    sql(s"drop datamap datamap8")
+  }
+
+  test("test create datamap with simple and same projection with datamap filter on non projection column and extra column filter") {
+    sql("create datamap datamap9 using 'mv' as select empname, designation from fact_table1 where deptname='cloud'")
+    sql(s"rebuild datamap datamap9")
+    val frame = sql("select empname,designation from fact_table1 where deptname='cloud'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap9"))
+    checkAnswer(frame, sql("select empname,designation from fact_table2 where deptname='cloud'"))
+    sql(s"drop datamap datamap9")
+  }
+
+  test("test create datamap with simple and same projection with datamap filter on non projection column and no column filter") {
+    sql("create datamap datamap10 using 'mv' as select empname, designation from fact_table1 where deptname='cloud'")
+    sql(s"rebuild datamap datamap10")
+    val frame = sql("select empname,designation from fact_table1")
+    val analyzed = frame.queryExecution.analyzed
+    assert(!verifyMVDataMap(analyzed, "datamap10"))
+    checkAnswer(frame, sql("select empname,designation from fact_table2"))
+    sql(s"drop datamap datamap10")
+  }
+
+  test("test create datamap with simple and same projection with datamap filter on non projection column and different column filter") {
+    sql("create datamap datamap11 using 'mv' as select empname, designation from fact_table1 where deptname='cloud'")
+    sql(s"rebuild datamap datamap11")
+    val frame = sql("select empname,designation from fact_table1 where designation='SA'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(!verifyMVDataMap(analyzed, "datamap11"))
+    checkAnswer(frame, sql("select empname,designation from fact_table2 where designation='SA'"))
+    sql(s"drop datamap datamap11")
+  }
+
+  test("test create datamap with simple and same group by query") {
+    sql("drop datamap if exists datamap12")
+    sql("create datamap datamap12 using 'mv' as select empname, sum(utilization) from fact_table1 group by empname")
+    sql(s"rebuild datamap datamap12")
+    val frame = sql("select empname, sum(utilization) from fact_table1 group by empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap12"))
+    checkAnswer(frame, sql("select empname, sum(utilization) from fact_table2 group by empname"))
+    sql(s"drop datamap datamap12")
+  }
+
+  test("test create datamap with simple and sub group by query") {
+    sql("drop datamap if exists datamap13")
+    sql("create datamap datamap13 using 'mv' as select empname, sum(utilization) from fact_table1 group by empname")
+    sql(s"rebuild datamap datamap13")
+    val frame = sql("select sum(utilization) from fact_table1 group by empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap13"))
+    checkAnswer(frame, sql("select sum(utilization) from fact_table2 group by empname"))
+    sql(s"drop datamap datamap13")
+  }
+
+  test("test create datamap with simple and sub group by query with filter on query") {
+    sql("drop datamap if exists datamap14")
+    sql("create datamap datamap14 using 'mv' as select empname, sum(utilization) from fact_table1 group by empname")
+    sql(s"rebuild datamap datamap14")
+    val frame = sql(
+      "select empname,sum(utilization) from fact_table1 group by empname having empname='shivani'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap14"))
+    checkAnswer(frame, sql("select empname,sum(utilization) from fact_table2 where empname='shivani' group by empname"))
+    sql(s"drop datamap datamap14")
+  }
+
+  test("test create datamap with simple and sub group and sub projection by query with filter on query") {
+    sql("drop datamap if exists datamap32")
+    sql("create datamap datamap32 using 'mv' as select empname, sum(utilization) from fact_table1 group by empname")
+    sql(s"rebuild datamap datamap32")
+    val frame = sql(
+      "select empname, sum(utilization) from fact_table1 group by empname having empname='shivani'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap32"))
+    checkAnswer(frame, sql( "select empname, sum(utilization) from fact_table2 group by empname having empname='shivani'"))
+    sql(s"drop datamap datamap32")
+  }
+
+  test("test create datamap with simple and sub group by query with filter on datamap") {
+    sql("create datamap datamap15 using 'mv' as select empname, sum(utilization) from fact_table1 where empname='shivani' group by empname")
+    sql(s"rebuild datamap datamap15")
+    val frame = sql(
+      "select empname,sum(utilization) from fact_table1 where empname='shivani' group by empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap15"))
+    checkAnswer(frame, sql("select empname,sum(utilization) from fact_table2 where empname='shivani' group by empname"))
+    sql(s"drop datamap datamap15")
+  }
+
+  test("test create datamap with simple and sub group by query with filter on datamap and no filter on query") {
+    sql("create datamap datamap16 using 'mv' as select empname, sum(utilization) from fact_table1 where empname='shivani' group by empname")
+    sql(s"rebuild datamap datamap16")
+    val frame = sql("select empname,sum(utilization) from fact_table1 group by empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(!verifyMVDataMap(analyzed, "datamap16"))
+    checkAnswer(frame, sql("select empname,sum(utilization) from fact_table2 group by empname"))
+    sql(s"drop datamap datamap16")
+  }
+
+  test("test create datamap with simple and same group by with expression") {
+    sql("create datamap datamap17 using 'mv' as select empname, sum(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table1 group by empname")
+    sql(s"rebuild datamap datamap17")
+    val frame = sql(
+      "select empname, sum(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table1 group" +
+      " by empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap17"))
+    checkAnswer(frame, sql("select empname, sum(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table2 group" +
+                           " by empname"))
+    sql(s"drop datamap datamap17")
+  }
+
+  test("test create datamap with simple and sub group by with expression") {
+    sql("drop datamap if exists datamap18")
+    sql("create datamap datamap18 using 'mv' as select empname, sum(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table1 group by empname")
+    sql(s"rebuild datamap datamap18")
+    val frame = sql(
+      "select sum(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table1 group by empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap18"))
+    checkAnswer(frame, sql("select sum(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table2 group by empname"))
+    sql(s"drop datamap datamap18")
+  }
+
+  test("test create datamap with simple and sub count group by with expression") {
+    sql("drop datamap if exists datamap19")
+    sql("create datamap datamap19 using 'mv' as select empname, count(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table1 group by empname")
+    sql(s"rebuild datamap datamap19")
+    val frame = sql(
+      "select count(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table1 group by empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap19"))
+    checkAnswer(frame, sql("select count(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table2 group by empname"))
+    sql(s"drop datamap datamap19")
+  }
+
+  test("test create datamap with simple and sub group by with expression and filter on query") {
+    sql("drop datamap if exists datamap20")
+    sql("create datamap datamap20 using 'mv' as select empname, sum(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table1 group by empname")
+    sql(s"rebuild datamap datamap20")
+    val frame = sql(
+      "select sum(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table1 where " +
+      "empname='shivani' group by empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap20"))
+    checkAnswer(frame, sql("select sum(CASE WHEN utilization=27 THEN deptno ELSE 0 END) from fact_table2 where " +
+                           "empname='shivani' group by empname"))
+    sql(s"drop datamap datamap20")
+  }
+
+  test("test create datamap with simple join") {
+    sql("drop datamap if exists datamap21")
+    sql("create datamap datamap21 using 'mv' as select t1.empname as c1, t2.designation, t2.empname as c2 from fact_table1 t1,fact_table2 t2 where t1.empname = t2.empname")
+    sql(s"rebuild datamap datamap21")
+    val frame = sql(
+      "select t1.empname as c1, t2.designation from fact_table1 t1,fact_table2 t2 where t1.empname = t2.empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap21"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation from fact_table4 t1,fact_table5 t2 where t1.empname = t2.empname"))
+    sql(s"drop datamap datamap21")
+  }
+
+  test("test create datamap with simple join and filter on query") {
+    sql("drop datamap if exists datamap22")
+    sql("create datamap datamap22 using 'mv' as select t1.empname, t2.designation, t2.empname from fact_table1 t1,fact_table2 t2 where t1.empname = t2.empname")
+    sql(s"rebuild datamap datamap22")
+    val frame = sql(
+      "select t1.empname, t2.designation from fact_table1 t1,fact_table2 t2 where t1.empname = " +
+      "t2.empname and t1.empname='shivani'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap22"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation from fact_table4 t1,fact_table5 t2 where t1.empname = " +
+                           "t2.empname and t1.empname='shivani'"))
+    sql(s"drop datamap datamap22")
+  }
+
+
+  test("test create datamap with simple join and filter on query and datamap") {
+    sql("drop datamap if exists datamap23")
+    sql("create datamap datamap23 using 'mv' as select t1.empname, t2.designation from fact_table1 t1,fact_table2 t2 where t1.empname = t2.empname and t1.empname='shivani'")
+    sql(s"rebuild datamap datamap23")
+    val frame = sql(
+      "select t1.empname, t2.designation from fact_table1 t1,fact_table2 t2 where t1.empname = " +
+      "t2.empname and t1.empname='shivani'")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap23"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation from fact_table4 t1,fact_table5 t2 where t1.empname = " +
+                           "t2.empname and t1.empname='shivani'"))
+    sql(s"drop datamap datamap23")
+  }
+
+  test("test create datamap with simple join and filter on datamap and no filter on query") {
+    sql("drop datamap if exists datamap24")
+    sql("create datamap datamap24 using 'mv' as select t1.empname, t2.designation from fact_table1 t1,fact_table2 t2 where t1.empname = t2.empname and t1.empname='shivani'")
+    sql(s"rebuild datamap datamap24")
+    val frame = sql(
+      "select t1.empname, t2.designation from fact_table1 t1,fact_table2 t2 where t1.empname = t2.empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(!verifyMVDataMap(analyzed, "datamap24"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation from fact_table4 t1,fact_table5 t2 where t1.empname = t2.empname"))
+    sql(s"drop datamap datamap24")
+  }
+
+  test("test create datamap with multiple join") {
+    sql("drop datamap if exists datamap25")
+    sql("create datamap datamap25 using 'mv' as select t1.empname as c1, t2.designation from fact_table1 t1,fact_table2 t2,fact_table3 t3  where t1.empname = t2.empname and t1.empname=t3.empname")
+    sql(s"rebuild datamap datamap25")
+    val frame = sql(
+      "select t1.empname as c1, t2.designation from fact_table1 t1,fact_table2 t2 where t1.empname = t2.empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(!verifyMVDataMap(analyzed, "datamap25"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation from fact_table4 t1,fact_table5 t2 where t1.empname = t2.empname"))
+    sql(s"drop datamap datamap25")
+  }
+
+  ignore("test create datamap with simple join on datamap and multi join on query") {
+    sql("create datamap datamap26 using 'mv' as select t1.empname, t2.designation, t2.empname from fact_table1 t1,fact_table2 t2 where t1.empname = t2.empname")
+    sql(s"rebuild datamap datamap26")
+    val frame = sql(
+      "select t1.empname, t2.designation, t2.empname from fact_table1 t1,fact_table2 t2,fact_table3 " +
+      "t3  where t1.empname = t2.empname and t1.empname=t3.empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap26"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation, t2.empname from fact_table4 t1,fact_table5 t2,fact_table6 " +
+                           "t3  where t1.empname = t2.empname and t1.empname=t3.empname"))
+    sql(s"drop datamap datamap26")
+  }
+
+  test("test create datamap with join with group by") {
+    sql("create datamap datamap27 using 'mv' as select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  where t1.empname = t2.empname group by t1.empname, t2.designation")
+    sql(s"rebuild datamap datamap27")
+    val frame = sql(
+      "select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  " +
+      "where t1.empname = t2.empname group by t1.empname, t2.designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap27"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation, sum(t1.utilization) from fact_table4 t1,fact_table5 t2  " +
+                           "where t1.empname = t2.empname group by t1.empname, t2.designation"))
+    sql(s"drop datamap datamap27")
+  }
+
+  test("test create datamap with join with group by and sub projection") {
+    sql("drop datamap if exists datamap28")
+    sql("create datamap datamap28 using 'mv' as select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  where t1.empname = t2.empname group by t1.empname, t2.designation")
+    sql(s"rebuild datamap datamap28")
+    val frame = sql(
+      "select t2.designation, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  where " +
+      "t1.empname = t2.empname group by t2.designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap28"))
+    checkAnswer(frame, sql("select t2.designation, sum(t1.utilization) from fact_table4 t1,fact_table5 t2  where " +
+                           "t1.empname = t2.empname group by t2.designation"))
+    sql(s"drop datamap datamap28")
+  }
+
+  test("test create datamap with join with group by and sub projection with filter") {
+    sql("drop datamap if exists datamap29")
+    sql("create datamap datamap29 using 'mv' as select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  where t1.empname = t2.empname group by t1.empname, t2.designation")
+    sql(s"rebuild datamap datamap29")
+    val frame = sql(
+      "select t2.designation, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  where " +
+      "t1.empname = t2.empname and t1.empname='shivani' group by t2.designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap29"))
+    checkAnswer(frame, sql("select t2.designation, sum(t1.utilization) from fact_table4 t1,fact_table5 t2  where " +
+                           "t1.empname = t2.empname and t1.empname='shivani' group by t2.designation"))
+    sql(s"drop datamap datamap29")
+  }
+
+  test("test create datamap with join with group by with filter") {
+    sql("drop datamap if exists datamap30")
+    sql("create datamap datamap30 using 'mv' as select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  where t1.empname = t2.empname group by t1.empname, t2.designation")
+    sql(s"rebuild datamap datamap30")
+    val frame = sql(
+      "select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  " +
+      "where t1.empname = t2.empname and t2.designation='SA' group by t1.empname, t2.designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap30"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation, sum(t1.utilization) from fact_table4 t1,fact_table5 t2  " +
+                           "where t1.empname = t2.empname and t2.designation='SA' group by t1.empname, t2.designation"))
+    sql(s"drop datamap datamap30")
+  }
+
+  test("test create datamap with expression on projection") {
+    sql(s"drop datamap if exists datamap31")
+    sql("create datamap datamap31 using 'mv' as select empname, designation, utilization, projectcode from fact_table1 ")
+    sql(s"rebuild datamap datamap31")
+    val frame = sql(
+      "select empname, designation, utilization+projectcode from fact_table1")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap31"))
+    checkAnswer(frame, sql("select empname, designation, utilization+projectcode from fact_table2"))
+    sql(s"drop datamap datamap31")
+  }
+
+  test("test create datamap with simple and sub group by query and count agg") {
+    sql(s"drop datamap if exists datamap32")
+    sql("create datamap datamap32 using 'mv' as select empname, count(utilization) from fact_table1 group by empname")
+    sql(s"rebuild datamap datamap32")
+    val frame = sql("select empname,count(utilization) from fact_table1 where empname='shivani' group by empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap32"))
+    checkAnswer(frame, sql("select empname,count(utilization) from fact_table2 where empname='shivani' group by empname"))
+    sql(s"drop datamap datamap32")
+  }
+
+  ignore("test create datamap with simple and sub group by query and avg agg") {
+    sql(s"drop datamap if exists datamap33")
+    sql("create datamap datamap33 using 'mv' as select empname, avg(utilization) from fact_table1 group by empname")
+    sql(s"rebuild datamap datamap33")
+    val frame = sql("select empname,avg(utilization) from fact_table1 where empname='shivani' group by empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap33"))
+    checkAnswer(frame, sql("select empname,avg(utilization) from fact_table2 where empname='shivani' group by empname"))
+    sql(s"drop datamap datamap33")
+  }
+
+  test("test create datamap with left join with group by") {
+    sql("drop datamap if exists datamap34")
+    sql("create datamap datamap34 using 'mv' as select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  on t1.empname = t2.empname group by t1.empname, t2.designation")
+    sql(s"rebuild datamap datamap34")
+    val frame = sql(
+      "select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  " +
+      "on t1.empname = t2.empname group by t1.empname, t2.designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap34"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation, sum(t1.utilization) from fact_table4 t1 left join fact_table5 t2  " +
+                           "on t1.empname = t2.empname group by t1.empname, t2.designation"))
+    sql(s"drop datamap datamap34")
+  }
+
+  test("test create datamap with simple and group by query with filter on datamap but not on projection") {
+    sql("create datamap datamap35 using 'mv' as select designation, sum(utilization) from fact_table1 where empname='shivani' group by designation")
+    sql(s"rebuild datamap datamap35")
+    val frame = sql(
+      "select designation, sum(utilization) from fact_table1 where empname='shivani' group by designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap35"))
+    checkAnswer(frame, sql("select designation, sum(utilization) from fact_table2 where empname='shivani' group by designation"))
+    sql(s"drop datamap datamap35")
+  }
+
+  test("test create datamap with simple and sub group by query with filter on datamap but not on projection") {
+    sql("create datamap datamap36 using 'mv' as select designation, sum(utilization) from fact_table1 where empname='shivani' group by designation")
+    sql(s"rebuild datamap datamap36")
+    val frame = sql(
+      "select sum(utilization) from fact_table1 where empname='shivani' group by designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap36"))
+    checkAnswer(frame, sql("select sum(utilization) from fact_table2 where empname='shivani' group by designation"))
+    sql(s"drop datamap datamap36")
+  }
+
+  test("test create datamap with agg push join with sub group by ") {
+    sql("drop datamap if exists datamap37")
+    sql("create datamap datamap37 using 'mv' as select empname, designation, sum(utilization) from fact_table1 group by empname, designation")
+    sql(s"rebuild datamap datamap37")
+    val frame = sql(
+      "select t1.empname, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  " +
+      "where t1.empname = t2.empname group by t1.empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap37"))
+    checkAnswer(frame, sql("select t1.empname, sum(t1.utilization) from fact_table3 t1,fact_table4 t2  " +
+                           "where t1.empname = t2.empname group by t1.empname, t1.designation"))
+    sql(s"drop datamap datamap37")
+  }
+
+  test("test create datamap with agg push join with group by ") {
+    sql("drop datamap if exists datamap38")
+    sql("create datamap datamap38 using 'mv' as select empname, designation, sum(utilization) from fact_table1 group by empname, designation")
+    sql(s"rebuild datamap datamap38")
+    val frame = sql(
+      "select t1.empname, t1.designation, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  " +
+      "where t1.empname = t2.empname group by t1.empname,t1.designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap38"))
+    checkAnswer(frame, sql("select t1.empname,t1.designation, sum(t1.utilization) from fact_table3 t1,fact_table4 t2  " +
+                           "where t1.empname = t2.empname group by t1.empname, t1.designation"))
+    sql(s"drop datamap datamap38")
+  }
+
+  test("test create datamap with agg push join with group by with filter") {
+    sql("drop datamap if exists datamap39")
+    sql("create datamap datamap39 using 'mv' as select empname, designation, sum(utilization) from fact_table1 group by empname, designation ")
+    sql(s"rebuild datamap datamap39")
+    val frame = sql(
+      "select t1.empname, t1.designation, sum(t1.utilization) from fact_table1 t1,fact_table2 t2  " +
+      "where t1.empname = t2.empname and t1.empname='shivani' group by t1.empname,t1.designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap39"))
+    checkAnswer(frame, sql("select t1.empname,t1.designation, sum(t1.utilization) from fact_table3 t1,fact_table4 t2  " +
+                           "where t1.empname = t2.empname and t1.empname='shivani' group by t1.empname, t1.designation"))
+    sql(s"drop datamap datamap39")
+  }
+
+  test("test create datamap with more agg push join with group by with filter") {
+    sql("drop datamap if exists datamap40")
+    sql("create datamap datamap40 using 'mv' as select empname, designation, sum(utilization), count(utilization) from fact_table1 group by empname, designation ")
+    sql(s"rebuild datamap datamap40")
+    val frame = sql(
+      "select t1.empname, t1.designation, sum(t1.utilization),count(t1.utilization) from fact_table1 t1,fact_table2 t2  " +
+      "where t1.empname = t2.empname and t1.empname='shivani' group by t1.empname,t1.designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap40"))
+    checkAnswer(frame, sql("select t1.empname, t1.designation, sum(t1.utilization),count(t1.utilization) from fact_table3 t1,fact_table4 t2  " +
+                           "where t1.empname = t2.empname and t1.empname='shivani' group by t1.empname,t1.designation"))
+    sql(s"drop datamap datamap40")
+  }
+
+  test("test create datamap with left join with group by with filter") {
+    sql("drop datamap if exists datamap41")
+    sql("create datamap datamap41 using 'mv' as select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  on t1.empname = t2.empname group by t1.empname, t2.designation")
+    sql(s"rebuild datamap datamap41")
+    val frame = sql(
+      "select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  " +
+      "on t1.empname = t2.empname where t1.empname='shivani' group by t1.empname, t2.designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap41"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation, sum(t1.utilization) from fact_table4 t1 left join fact_table5 t2  " +
+                           "on t1.empname = t2.empname where t1.empname='shivani' group by t1.empname, t2.designation"))
+    sql(s"drop datamap datamap41")
+  }
+
+  test("test create datamap with left join with sub group by") {
+    sql("drop datamap if exists datamap42")
+    sql("create datamap datamap42 using 'mv' as select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  on t1.empname = t2.empname group by t1.empname, t2.designation")
+    sql(s"rebuild datamap datamap42")
+    val frame = sql(
+      "select t1.empname, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  " +
+      "on t1.empname = t2.empname group by t1.empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap42"))
+    checkAnswer(frame, sql("select t1.empname, sum(t1.utilization) from fact_table4 t1 left join fact_table5 t2  " +
+                           "on t1.empname = t2.empname group by t1.empname"))
+    sql(s"drop datamap datamap42")
+  }
+
+  test("test create datamap with left join with sub group by with filter") {
+    sql("drop datamap if exists datamap43")
+    sql("create datamap datamap43 using 'mv' as select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  on t1.empname = t2.empname group by t1.empname, t2.designation")
+    sql(s"rebuild datamap datamap43")
+    val frame = sql(
+      "select t1.empname, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  " +
+      "on t1.empname = t2.empname where t1.empname='shivani' group by t1.empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap43"))
+    checkAnswer(frame, sql("select t1.empname, sum(t1.utilization) from fact_table4 t1 left join fact_table5 t2  " +
+                           "on t1.empname = t2.empname where t1.empname='shivani' group by t1.empname"))
+    sql(s"drop datamap datamap43")
+  }
+
+  test("test create datamap with left join with sub group by with filter on mv") {
+    sql("drop datamap if exists datamap44")
+    sql("create datamap datamap44 using 'mv' as select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  on t1.empname = t2.empname where t1.empname='shivani' group by t1.empname, t2.designation")
+    sql(s"rebuild datamap datamap44")
+    val frame = sql(
+      "select t1.empname, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  " +
+      "on t1.empname = t2.empname where t1.empname='shivani' group by t1.empname")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap44"))
+    checkAnswer(frame, sql("select t1.empname, sum(t1.utilization) from fact_table4 t1 left join fact_table5 t2  " +
+                           "on t1.empname = t2.empname where t1.empname='shivani' group by t1.empname"))
+    sql(s"drop datamap datamap44")
+  }
+
+  test("test create datamap with left join on query and equi join on mv with group by with filter") {
+    sql("drop datamap if exists datamap45")
+
+    sql("create datamap datamap45 using 'mv' as select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1 join fact_table2 t2  on t1.empname = t2.empname group by t1.empname, t2.designation")
+    sql(s"rebuild datamap datamap45")
+    // During spark optimizer it converts the left outer join queries with equi join if any filter present on right side table
+    val frame = sql(
+      "select t1.empname, t2.designation, sum(t1.utilization) from fact_table1 t1 left join fact_table2 t2  " +
+      "on t1.empname = t2.empname where t2.designation='SA' group by t1.empname, t2.designation")
+    val analyzed = frame.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap45"))
+    checkAnswer(frame, sql("select t1.empname, t2.designation, sum(t1.utilization) from fact_table4 t1 left join fact_table5 t2  " +
+                           "on t1.empname = t2.empname where t2.designation='SA' group by t1.empname, t2.designation"))
+    sql(s"drop datamap datamap45")
+  }
+
+
+  def verifyMVDataMap(logicalPlan: LogicalPlan, dataMapName: String): Boolean = {
+    val tables = logicalPlan collect {
+      case l: LogicalRelation => l.catalogTable.get
+    }
+    tables.exists(_.identifier.table.equalsIgnoreCase(dataMapName+"_table"))
+  }
+
+
+  def drop(): Unit = {
+    sql("drop table IF EXISTS fact_table1")
+    sql("drop table IF EXISTS fact_table2")
+    sql("drop table IF EXISTS fact_table3")
+    sql("drop table IF EXISTS fact_table4")
+    sql("drop table IF EXISTS fact_table5")
+    sql("drop table IF EXISTS fact_table6")
+  }
+
+  override def afterAll {
+    drop()
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+        CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVSampleTestCase.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVSampleTestCase.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVSampleTestCase.scala
new file mode 100644
index 0000000..f8eb11f
--- /dev/null
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVSampleTestCase.scala
@@ -0,0 +1,167 @@
+package org.apache.carbondata.mv.rewrite
+
+import java.io.File
+
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.execution.datasources.LogicalRelation
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.mv.rewrite.matching.TestSQLBatch._
+
+class MVSampleTestCase extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    drop()
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+    val projectPath = new File(this.getClass.getResource("/").getPath + "../../../../../")
+      .getCanonicalPath.replaceAll("\\\\", "/")
+    val integrationPath = s"$projectPath/integration"
+    val resourcesPath = s"$integrationPath/spark-common-test/src/test/resources"
+    sql("drop database if exists sample cascade")
+    sql("create database sample")
+    sql("use sample")
+
+    createTables.map(sql)
+
+  }
+
+  def createTables: Seq[String] = {
+    Seq[String](
+      s"""
+         |CREATE TABLE Fact (
+         |  `tid`     int,
+         |  `fpgid`   int,
+         |  `flid`    int,
+         |  `date`    timestamp,
+         |  `faid`    int,
+         |  `price`   double,
+         |  `qty`     int,
+         |  `disc`    string
+         |)
+         |STORED BY 'org.apache.carbondata.format'
+        """.stripMargin.trim,
+      s"""
+         |CREATE TABLE Dim (
+         |  `lid`     int,
+         |  `city`    string,
+         |  `state`   string,
+         |  `country` string
+         |)
+         |STORED BY 'org.apache.carbondata.format'
+        """.stripMargin.trim,
+      s"""
+         |CREATE TABLE Item (
+         |  `i_item_id`     int,
+         |  `i_item_sk`     int
+         |)
+         |STORED BY 'org.apache.carbondata.format'
+        """.stripMargin.trim
+    )
+  }
+
+
+  test("test create datamap with sampleTestCases case_1") {
+    sql(s"drop datamap if exists datamap_sm1")
+    sql(s"create datamap datamap_sm1 using 'mv' as ${sampleTestCases(0)._2}")
+    sql(s"rebuild datamap datamap_sm1")
+    val df = sql(sampleTestCases(0)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(!verifyMVDataMap(analyzed, "datamap_sm1"))
+    sql(s"drop datamap datamap_sm1")
+  }
+
+  test("test create datamap with sampleTestCases case_3") {
+    sql(s"drop datamap if exists datamap_sm2")
+    sql(s"create datamap datamap_sm2 using 'mv' as ${sampleTestCases(2)._2}")
+    sql(s"rebuild datamap datamap_sm2")
+    val df = sql(sampleTestCases(2)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_sm2"))
+    sql(s"drop datamap datamap_sm2")
+  }
+
+  test("test create datamap with sampleTestCases case_4") {
+    sql(s"drop datamap if exists datamap_sm3")
+    sql(s"create datamap datamap_sm3 using 'mv' as ${sampleTestCases(3)._2}")
+    sql(s"rebuild datamap datamap_sm3")
+    val df = sql(sampleTestCases(3)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_sm3"))
+    sql(s"drop datamap datamap_sm3")
+  }
+
+  test("test create datamap with sampleTestCases case_5") {
+    sql(s"drop datamap if exists datamap_sm4")
+    sql(s"create datamap datamap_sm4 using 'mv' as ${sampleTestCases(4)._2}")
+    sql(s"rebuild datamap datamap_sm4")
+    val df = sql(sampleTestCases(4)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_sm4"))
+    sql(s"drop datamap datamap_sm4")
+  }
+
+  test("test create datamap with sampleTestCases case_6") {
+    sql(s"drop datamap if exists datamap_sm5")
+    sql(s"create datamap datamap_sm5 using 'mv' as ${sampleTestCases(5)._2}")
+    sql(s"rebuild datamap datamap_sm5")
+    val df = sql(sampleTestCases(5)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(!verifyMVDataMap(analyzed, "datamap_sm5"))
+    sql(s"drop datamap datamap_sm5")
+  }
+
+  test("test create datamap with sampleTestCases case_7") {
+    sql(s"drop datamap if exists datamap_sm6")
+    sql(s"create datamap datamap_sm6 using 'mv' as ${sampleTestCases(6)._2}")
+    sql(s"rebuild datamap datamap_sm6")
+    val df = sql(sampleTestCases(6)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_sm6"))
+    sql(s"drop datamap datamap_sm6")
+  }
+
+  test("test create datamap with sampleTestCases case_8") {
+    sql(s"drop datamap if exists datamap_sm7")
+    sql(s"create datamap datamap_sm7 using 'mv' as ${sampleTestCases(7)._2}")
+    sql(s"rebuild datamap datamap_sm7")
+    val df = sql(sampleTestCases(7)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_sm7"))
+    sql(s"drop datamap datamap_sm7")
+  }
+
+  test("test create datamap with sampleTestCases case_9") {
+    sql(s"drop datamap if exists datamap_sm8")
+    sql(s"create datamap datamap_sm8 using 'mv' as ${sampleTestCases(8)._2}")
+    sql(s"rebuild datamap datamap_sm8")
+    val df = sql(sampleTestCases(8)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_sm8"))
+    sql(s"drop datamap datamap_sm8")
+  }
+
+
+  def verifyMVDataMap(logicalPlan: LogicalPlan, dataMapName: String): Boolean = {
+    val tables = logicalPlan collect {
+      case l: LogicalRelation => l.catalogTable.get
+    }
+    tables.exists(_.identifier.table.equalsIgnoreCase(dataMapName+"_table"))
+  }
+
+
+  def drop(): Unit = {
+    sql("use default")
+    sql("drop database if exists sample cascade")
+  }
+
+  override def afterAll {
+    drop()
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+        CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTPCDSTestCase.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTPCDSTestCase.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTPCDSTestCase.scala
new file mode 100644
index 0000000..473b338
--- /dev/null
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTPCDSTestCase.scala
@@ -0,0 +1,146 @@
+package org.apache.carbondata.mv.rewrite
+
+import java.io.File
+
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.execution.datasources.LogicalRelation
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.mv.rewrite.matching.TestTPCDS_1_4_Batch._
+import org.apache.carbondata.mv.testutil.Tpcds_1_4_Tables.tpcds1_4Tables
+
+class MVTPCDSTestCase extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    drop()
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+    val projectPath = new File(this.getClass.getResource("/").getPath + "../../../../../")
+      .getCanonicalPath.replaceAll("\\\\", "/")
+    val integrationPath = s"$projectPath/integration"
+    val resourcesPath = s"$integrationPath/spark-common-test/src/test/resources"
+    sql("drop database if exists tpcds cascade")
+    sql("create database tpcds")
+    sql("use tpcds")
+
+    tpcds1_4Tables.foreach { create_table =>
+      sql(create_table)
+    }
+
+  }
+
+  ignore("test create datamap with tpcds_1_4_testCases case_1") {
+    sql(s"drop datamap if exists datamap_tpcds1")
+    sql(s"create datamap datamap_tpcds1 using 'mv' as ${tpcds_1_4_testCases(0)._2}")
+    sql(s"rebuild datamap datamap_tpcds1")
+    val df = sql(tpcds_1_4_testCases(0)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_tpcds1"))
+    sql(s"drop datamap datamap_tpcds1")
+  }
+
+  ignore("test create datamap with tpcds_1_4_testCases case_3") {
+    sql(s"drop datamap if exists datamap_tpcds3")
+    sql(s"create datamap datamap_tpcds3 using 'mv' as ${tpcds_1_4_testCases(2)._2}")
+    sql(s"rebuild datamap datamap_tpcds3")
+    val df = sql(tpcds_1_4_testCases(2)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_tpcds3"))
+    sql(s"drop datamap datamap_tpcds3")
+  }
+
+  test("test create datamap with tpcds_1_4_testCases case_4") {
+    sql(s"drop datamap if exists datamap_tpcds4")
+    sql(s"create datamap datamap_tpcds4 using 'mv' as ${tpcds_1_4_testCases(3)._2}")
+    sql(s"rebuild datamap datamap_tpcds4")
+    val df = sql(tpcds_1_4_testCases(3)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_tpcds4"))
+    sql(s"drop datamap datamap_tpcds4")
+  }
+
+  ignore("test create datamap with tpcds_1_4_testCases case_5") {
+    sql(s"drop datamap if exists datamap_tpcds5")
+    sql(s"create datamap datamap_tpcds5 using 'mv' as ${tpcds_1_4_testCases(4)._2}")
+    sql(s"rebuild datamap datamap_tpcds5")
+    val df = sql(tpcds_1_4_testCases(4)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_tpcds5"))
+    sql(s"drop datamap datamap_tpcds5")
+  }
+
+  ignore("test create datamap with tpcds_1_4_testCases case_6") {
+    sql(s"drop datamap if exists datamap_tpcds6")
+    sql(s"create datamap datamap_tpcds6 using 'mv' as ${tpcds_1_4_testCases(5)._2}")
+    sql(s"rebuild datamap datamap_tpcds6")
+    val df = sql(tpcds_1_4_testCases(5)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_tpcds6"))
+    sql(s"drop datamap datamap_tpcds6")
+  }
+
+  ignore("test create datamap with tpcds_1_4_testCases case_8") {
+    sql(s"drop datamap if exists datamap_tpcds8")
+    sql(s"create datamap datamap_tpcds8 using 'mv' as ${tpcds_1_4_testCases(7)._2}")
+    sql(s"rebuild datamap datamap_tpcds8")
+    val df = sql(tpcds_1_4_testCases(7)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_tpcds8"))
+    sql(s"drop datamap datamap_tpcds8")
+  }
+
+  ignore("test create datamap with tpcds_1_4_testCases case_11") {
+    sql(s"drop datamap if exists datamap_tpcds11")
+    sql(s"create datamap datamap_tpcds11 using 'mv' as ${tpcds_1_4_testCases(10)._2}")
+    sql(s"rebuild datamap datamap_tpcds11")
+    val df = sql(tpcds_1_4_testCases(10)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_tpcds11"))
+    sql(s"drop datamap datamap_tpcds11")
+  }
+
+  ignore("test create datamap with tpcds_1_4_testCases case_15") {
+    sql(s"drop datamap if exists datamap_tpcds15")
+    sql(s"create datamap datamap_tpcds15 using 'mv' as ${tpcds_1_4_testCases(14)._2}")
+    sql(s"rebuild datamap datamap_tpcds15")
+    val df = sql(tpcds_1_4_testCases(14)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_tpcds15"))
+    sql(s"drop datamap datamap_tpcds15")
+  }
+
+  ignore("test create datamap with tpcds_1_4_testCases case_16") {
+    sql(s"drop datamap if exists datamap_tpcds16")
+    sql(s"create datamap datamap_tpcds16 using 'mv' as ${tpcds_1_4_testCases(15)._2}")
+    sql(s"rebuild datamap datamap_tpcds16")
+    val df = sql(tpcds_1_4_testCases(15)._3)
+    val analyzed = df.queryExecution.analyzed
+    assert(verifyMVDataMap(analyzed, "datamap_tpcds16"))
+    sql(s"drop datamap datamap_tpcds16")
+  }
+
+
+
+  def verifyMVDataMap(logicalPlan: LogicalPlan, dataMapName: String): Boolean = {
+    val tables = logicalPlan collect {
+      case l: LogicalRelation => l.catalogTable.get
+    }
+    tables.exists(_.identifier.table.equalsIgnoreCase(dataMapName+"_table"))
+  }
+
+
+  def drop(): Unit = {
+    sql("use default")
+    sql("drop database if exists tpcds cascade")
+  }
+
+  override def afterAll {
+    drop()
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+        CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+  }
+}


[29/50] [abbrv] carbondata git commit: [CARBONDATA-2474] Support Modular Plan for Materialized View DataMap

Posted by ma...@apache.org.
[CARBONDATA-2474] Support Modular Plan for Materialized View DataMap

Currently carbon supports preaggregate datamap, which only supports preaggregate on single table. To improve it, we can add join capability by implementing Materialized View.

In carbon Materialized View, Modular Plan is the basic abstraction for materialized view query plan. In this PR, modular plan module is added, it defines plan tree structure and conversion from Spark Logical Plan.

This closes #2301


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/ffddba70
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/ffddba70
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/ffddba70

Branch: refs/heads/spark-2.3
Commit: ffddba704bdc110b7fae9054b186c931cfb23744
Parents: 443b717
Author: Jacky Li <ja...@qq.com>
Authored: Sat May 12 17:23:20 2018 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Sat May 12 20:31:43 2018 +0530

----------------------------------------------------------------------
 .../core/datamap/DataMapStoreManager.java       |   36 +-
 .../schema/datamap/DataMapClassProvider.java    |    3 +-
 .../metadata/schema/table/DataMapSchema.java    |    4 +-
 datamap/mv/plan/pom.xml                         |  157 +
 .../org/apache/carbondata/mv/dsl/package.scala  |  101 +
 .../mv/expressions/modular/subquery.scala       |  169 +
 .../mv/plans/modular/AggregatePushDown.scala    |  170 +
 .../carbondata/mv/plans/modular/Flags.scala     |   71 +
 .../mv/plans/modular/Harmonizer.scala           |  236 +
 .../mv/plans/modular/ModularPatterns.scala      |  237 +
 .../mv/plans/modular/ModularPlan.scala          |  206 +
 .../modular/ModularPlanSignatureGenerator.scala |   73 +
 .../mv/plans/modular/ModularRelation.scala      |  143 +
 .../mv/plans/modular/Modularizer.scala          |  117 +
 .../mv/plans/modular/basicOperators.scala       |   86 +
 .../mv/plans/modular/queryGraph.scala           |   24 +
 .../apache/carbondata/mv/plans/package.scala    |   55 +
 .../mv/plans/util/BirdcageOptimizer.scala       |  199 +
 .../plans/util/Logical2ModularExtractions.scala |  355 ++
 .../util/LogicalPlanSignatureGenerator.scala    |  101 +
 .../carbondata/mv/plans/util/Printers.scala     |  347 ++
 .../carbondata/mv/plans/util/SQLBuild.scala     |   31 +
 .../carbondata/mv/plans/util/SQLBuildDSL.scala  |  428 ++
 .../carbondata/mv/plans/util/SQLBuilder.scala   |  262 ++
 .../carbondata/mv/plans/util/Signature.scala    |   49 +
 .../carbondata/mv/plans/util/TableCluster.scala |   55 +
 .../mv/testutil/ModularPlanTest.scala           |  180 +
 .../mv/testutil/Tpcds_1_4_QueryBatch.scala      | 4293 ++++++++++++++++++
 .../mv/testutil/Tpcds_1_4_Tables.scala          |  819 ++++
 .../org/apache/carbondata/mv/TestSQLBatch.scala |  584 +++
 .../mv/plans/ExtractJoinConditionsSuite.scala   |   67 +
 .../carbondata/mv/plans/IsSPJGHSuite.scala      |   59 +
 .../mv/plans/LogicalToModularPlanSuite.scala    |  196 +
 .../carbondata/mv/plans/ModularToSQLSuite.scala |  164 +
 .../carbondata/mv/plans/SignatureSuite.scala    |  104 +
 .../carbondata/spark/util/CarbonScalaUtil.scala |   12 +-
 .../apache/spark/sql/util/SparkSQLUtil.scala    |    7 +-
 .../carbondata/datamap/DataMapManager.java      |    9 +
 .../datamap/CarbonDropDataMapCommand.scala      |    9 +-
 .../command/table/CarbonDropTableCommand.scala  |    1 +
 pom.xml                                         |    6 +
 41 files changed, 10214 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
index 9f7af2b..a3be26a 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
@@ -64,7 +64,7 @@ public final class DataMapStoreManager {
   /**
    * Contains the datamap catalog for each datamap provider.
    */
-  private Map<String, DataMapCatalog> dataMapCatalogs = new ConcurrentHashMap<>();
+  private Map<String, DataMapCatalog> dataMapCatalogs = null;
 
   private Map<String, TableSegmentRefresher> segmentRefreshMap = new ConcurrentHashMap<>();
 
@@ -166,7 +166,8 @@ public final class DataMapStoreManager {
    * @param dataMapSchema
    */
   public synchronized void registerDataMapCatalog(DataMapProvider dataMapProvider,
-      DataMapSchema dataMapSchema) {
+      DataMapSchema dataMapSchema) throws IOException {
+    intializeDataMapCatalogs(dataMapProvider);
     String name = dataMapSchema.getProviderName();
     DataMapCatalog dataMapCatalog = dataMapCatalogs.get(name);
     if (dataMapCatalog == null) {
@@ -185,6 +186,9 @@ public final class DataMapStoreManager {
    * @param dataMapSchema
    */
   public synchronized void unRegisterDataMapCatalog(DataMapSchema dataMapSchema) {
+    if (dataMapCatalogs == null) {
+      return;
+    }
     String name = dataMapSchema.getProviderName();
     DataMapCatalog dataMapCatalog = dataMapCatalogs.get(name);
     if (dataMapCatalog != null) {
@@ -197,11 +201,37 @@ public final class DataMapStoreManager {
    * @param providerName
    * @return
    */
-  public DataMapCatalog getDataMapCatalog(String providerName) {
+  public DataMapCatalog getDataMapCatalog(DataMapProvider dataMapProvider, String providerName)
+      throws IOException {
+    intializeDataMapCatalogs(dataMapProvider);
     return dataMapCatalogs.get(providerName);
   }
 
   /**
+   * Initialize by reading all datamaps from store and re register it
+   * @param dataMapProvider
+   */
+  private void intializeDataMapCatalogs(DataMapProvider dataMapProvider) throws IOException {
+    if (dataMapCatalogs == null) {
+      dataMapCatalogs = new ConcurrentHashMap<>();
+      List<DataMapSchema> dataMapSchemas = getAllDataMapSchemas();
+      for (DataMapSchema schema : dataMapSchemas) {
+        DataMapCatalog dataMapCatalog = dataMapCatalogs.get(schema.getProviderName());
+        if (dataMapCatalog == null) {
+          dataMapCatalog = dataMapProvider.createDataMapCatalog();
+          dataMapCatalogs.put(schema.getProviderName(), dataMapCatalog);
+        }
+        try {
+          dataMapCatalog.registerSchema(schema);
+        } catch (Exception e) {
+          // Ignore the schema
+          LOGGER.error(e, "Error while registering schema");
+        }
+      }
+    }
+  }
+
+  /**
    * It gives the default datamap of the table. Default datamap of any table is BlockletDataMap
    *
    * @param table

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/core/src/main/java/org/apache/carbondata/core/metadata/schema/datamap/DataMapClassProvider.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/datamap/DataMapClassProvider.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/datamap/DataMapClassProvider.java
index d5a99e8..772bb40 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/datamap/DataMapClassProvider.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/datamap/DataMapClassProvider.java
@@ -30,7 +30,8 @@ public enum DataMapClassProvider {
   PREAGGREGATE("org.apache.carbondata.core.datamap.AggregateDataMap", "preaggregate"),
   TIMESERIES("org.apache.carbondata.core.datamap.TimeSeriesDataMap", "timeseries"),
   LUCENE("org.apache.carbondata.datamap.lucene.LuceneFineGrainDataMapFactory","lucene"),
-  BLOOMFILTER("org.apache.carbondata.datamap.bloom.BloomCoarseGrainDataMapFactory", "bloomfilter");
+  BLOOMFILTER("org.apache.carbondata.datamap.bloom.BloomCoarseGrainDataMapFactory", "bloomfilter"),
+  MV("org.apache.carbondata.core.datamap.MVDataMap", "mv");
 
   /**
    * Fully qualified class name of datamap

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
index 611f298..7f6e86f 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
@@ -152,7 +152,9 @@ public class DataMapSchema implements Serializable, Writable {
    */
   public boolean isIndexDataMap() {
     if (providerName.equalsIgnoreCase(DataMapClassProvider.PREAGGREGATE.getShortName()) ||
-        providerName.equalsIgnoreCase(DataMapClassProvider.TIMESERIES.getShortName())) {
+        providerName.equalsIgnoreCase(DataMapClassProvider.TIMESERIES.getShortName()) ||
+        providerName.equalsIgnoreCase(DataMapClassProvider.MV.getShortName()) ||
+        ctasQuery != null) {
       return false;
     } else {
       return true;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/pom.xml
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/pom.xml b/datamap/mv/plan/pom.xml
new file mode 100644
index 0000000..6a36fc5
--- /dev/null
+++ b/datamap/mv/plan/pom.xml
@@ -0,0 +1,157 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.carbondata</groupId>
+    <artifactId>carbondata-parent</artifactId>
+    <version>1.4.0-SNAPSHOT</version>
+    <relativePath>../../../pom.xml</relativePath>
+  </parent>
+
+  <artifactId>carbondata-mv-plan</artifactId>
+  <name>Apache CarbonData :: Materialized View Plan</name>
+
+  <properties>
+    <dev.path>${basedir}/../../../dev</dev.path>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.carbondata</groupId>
+      <artifactId>carbondata-spark2</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.scalatest</groupId>
+      <artifactId>scalatest_${scala.binary.version}</artifactId>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <testSourceDirectory>src/test/scala</testSourceDirectory>
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <configuration>
+          <source>1.8</source>
+          <target>1.8</target>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <version>2.18</version>
+        <!-- Note config is repeated in scalatest config -->
+        <configuration>
+          <skip>false</skip>
+          <reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
+          <argLine>-Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
+          <systemProperties>
+            <java.awt.headless>true</java.awt.headless>
+          </systemProperties>
+          <testFailureIgnore>false</testFailureIgnore>
+          <failIfNoTests>false</failIfNoTests>
+        </configuration>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <version>2.17</version>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.scala-tools</groupId>
+        <artifactId>maven-scala-plugin</artifactId>
+        <version>2.15.2</version>
+        <executions>
+          <execution>
+            <id>compile</id>
+            <goals>
+              <goal>compile</goal>
+            </goals>
+            <phase>compile</phase>
+          </execution>
+          <execution>
+            <id>testCompile</id>
+            <goals>
+              <goal>testCompile</goal>
+            </goals>
+            <phase>test</phase>
+          </execution>
+          <execution>
+            <phase>process-resources</phase>
+            <goals>
+              <goal>compile</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <version>1.4.1</version>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>com.ning.maven.plugins</groupId>
+        <artifactId>maven-duplicate-finder-plugin</artifactId>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.scalatest</groupId>
+        <artifactId>scalatest-maven-plugin</artifactId>
+        <version>1.0</version>
+        <!-- Note config is repeated in surefire config -->
+        <configuration>
+          <reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
+          <junitxml>.</junitxml>
+          <testFailureIgnore>false</testFailureIgnore>
+          <filereports>CarbonTestSuite.txt</filereports>
+          <argLine>-ea -Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m
+          </argLine>
+          <stderr />
+          <environmentVariables>
+          </environmentVariables>
+          <systemProperties>
+            <java.awt.headless>true</java.awt.headless>
+          </systemProperties>
+        </configuration>
+        <executions>
+          <execution>
+            <id>test</id>
+            <goals>
+              <goal>test</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+</project>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/dsl/package.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/dsl/package.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/dsl/package.scala
new file mode 100644
index 0000000..20b5e8a
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/dsl/package.scala
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv
+
+import scala.language.implicitConversions
+
+import org.apache.spark.sql.catalyst._
+import org.apache.spark.sql.catalyst.expressions.{Expression, NamedExpression}
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+
+import org.apache.carbondata.mv.plans._
+import org.apache.carbondata.mv.plans.modular.{JoinEdge, ModularPlan}
+import org.apache.carbondata.mv.plans.modular.Flags._
+import org.apache.carbondata.mv.plans.util._
+
+/**
+ * A collection of implicit conversions that create a DSL for constructing data structures
+ * for modular plans.
+ *
+ */
+package object dsl {
+
+  // object plans {
+
+    implicit class DslModularPlan(val modularPlan: ModularPlan) {
+      def select(outputExprs: NamedExpression*)
+        (inputExprs: Expression*)
+        (predicateExprs: Expression*)
+        (aliasMap: Map[Int, String])
+        (joinEdges: JoinEdge*): ModularPlan = {
+        modular
+          .Select(
+            outputExprs,
+            inputExprs,
+            predicateExprs,
+            aliasMap,
+            joinEdges,
+            Seq(modularPlan),
+            NoFlags,
+            Seq.empty,
+            Seq.empty)
+      }
+
+      def groupBy(outputExprs: NamedExpression*)
+        (inputExprs: Expression*)
+        (predicateExprs: Expression*): ModularPlan = {
+        modular
+          .GroupBy(outputExprs, inputExprs, predicateExprs, None, modularPlan, NoFlags, Seq.empty)
+      }
+
+      def harmonize: ModularPlan = modularPlan.harmonized
+    }
+
+    implicit class DslModularPlans(val modularPlans: Seq[ModularPlan]) {
+      def select(outputExprs: NamedExpression*)
+        (inputExprs: Expression*)
+        (predicateList: Expression*)
+        (aliasMap: Map[Int, String])
+        (joinEdges: JoinEdge*): ModularPlan = {
+        modular
+          .Select(
+            outputExprs,
+            inputExprs,
+            predicateList,
+            aliasMap,
+            joinEdges,
+            modularPlans,
+            NoFlags,
+            Seq.empty,
+            Seq.empty)
+      }
+
+      def union(): ModularPlan = modular.Union(modularPlans, NoFlags, Seq.empty)
+    }
+
+    implicit class DslLogical2Modular(val logicalPlan: LogicalPlan) {
+      def resolveonly: LogicalPlan = analysis.SimpleAnalyzer.execute(logicalPlan)
+
+      def modularize: ModularPlan = modular.SimpleModularizer.modularize(logicalPlan).next
+
+      def optimize: LogicalPlan = BirdcageOptimizer.execute(logicalPlan)
+    }
+
+  // }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/expressions/modular/subquery.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/expressions/modular/subquery.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/expressions/modular/subquery.scala
new file mode 100644
index 0000000..cfe341a
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/expressions/modular/subquery.scala
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.expressions.modular
+
+import org.apache.spark.sql.catalyst.expressions.{AttributeSeq, AttributeSet, Expression, ExprId, LeafExpression, NamedExpression, OuterReference, PlanExpression, Predicate, Unevaluable}
+import org.apache.spark.sql.catalyst.plans.QueryPlan
+import org.apache.spark.sql.types._
+
+import org.apache.carbondata.mv.plans.modular.ModularPlan
+
+/**
+ * A base interface for expressions that contain a [[ModularPlan]].
+ */
+abstract class ModularSubquery(
+    plan: ModularPlan,
+    children: Seq[Expression],
+    exprId: ExprId) extends PlanExpression[ModularPlan] {
+  override lazy val resolved: Boolean = childrenResolved && plan.resolved
+  override lazy val references: AttributeSet =
+    if (plan.resolved) {
+      super.references -- plan.outputSet
+    } else {
+      super.references
+    }
+
+  override def withNewPlan(plan: ModularPlan): ModularSubquery
+
+  override def semanticEquals(o: Expression): Boolean = {
+    o match {
+      case p: ModularSubquery =>
+        this.getClass.getName.equals(p.getClass.getName) && plan.sameResult(p.plan) &&
+        children.length == p.children.length &&
+        children.zip(p.children).forall(p => p._1.semanticEquals(p._2))
+      case _ => false
+    }
+  }
+
+  def canonicalize(attrs: AttributeSeq): ModularSubquery = {
+    // Normalize the outer references in the subquery plan.
+    val normalizedPlan = plan.transformAllExpressions {
+      case OuterReference(r) => OuterReference(QueryPlan.normalizeExprId(r, attrs))
+    }
+    withNewPlan(normalizedPlan).canonicalized.asInstanceOf[ModularSubquery]
+  }
+}
+
+/**
+ * A subquery that will return only one row and one column. This will be converted into a physical
+ * scalar subquery during planning.
+ *
+ * Note: `exprId` is used to have a unique name in explain string output.
+ */
+case class ScalarModularSubquery(
+    plan: ModularPlan,
+    children: Seq[Expression] = Seq.empty,
+    exprId: ExprId = NamedExpression.newExprId)
+  extends ModularSubquery(plan, children, exprId) with Unevaluable {
+  override def dataType: DataType = plan.schema.fields.head.dataType
+
+  override def nullable: Boolean = true
+
+  override def withNewPlan(plan: ModularPlan): ScalarModularSubquery = copy(plan = plan)
+
+  override def toString: String = s"scalar-modular-subquery#${ exprId.id } $conditionString"
+
+  override lazy val canonicalized: Expression = {
+    ScalarModularSubquery(
+      plan.canonicalized,
+      children.map(_.canonicalized),
+      ExprId(0))
+  }
+}
+
+object ScalarModularSubquery {
+  def hasCorrelatedScalarSubquery(e: Expression): Boolean = {
+    e.find {
+      case s: ScalarModularSubquery => s.children.nonEmpty
+      case _ => false
+    }.isDefined
+  }
+}
+
+/**
+ * A [[ListQuery]] expression defines the query which we want to search in an IN subquery
+ * expression. It should and can only be used in conjunction with an IN expression.
+ *
+ * For example (SQL):
+ * {{{
+ *   SELECT  *
+ *   FROM    a
+ *   WHERE   a.id IN (SELECT  id
+ *                    FROM    b)
+ * }}}
+ */
+case class ModularListQuery(
+    plan: ModularPlan,
+    children: Seq[Expression] = Seq.empty,
+    exprId: ExprId = NamedExpression.newExprId)
+  extends ModularSubquery(plan, children, exprId) with Unevaluable {
+  override def dataType: DataType = plan.schema.fields.head.dataType
+
+  override def nullable: Boolean = false
+
+  override def withNewPlan(plan: ModularPlan): ModularListQuery = copy(plan = plan)
+
+  override def toString: String = s"modular-list#${ exprId.id } $conditionString"
+
+  override lazy val canonicalized: Expression = {
+    ModularListQuery(
+      plan.canonicalized,
+      children.map(_.canonicalized),
+      ExprId(0))
+  }
+}
+
+/**
+ * The [[Exists]] expression checks if a row exists in a subquery given some correlated condition.
+ *
+ * For example (SQL):
+ * {{{
+ *   SELECT  *
+ *   FROM    a
+ *   WHERE   EXISTS (SELECT  *
+ *                   FROM    b
+ *                   WHERE   b.id = a.id)
+ * }}}
+ */
+case class ModularExists(
+    plan: ModularPlan,
+    children: Seq[Expression] = Seq.empty,
+    exprId: ExprId = NamedExpression.newExprId)
+  extends ModularSubquery(plan, children, exprId) with Predicate with Unevaluable {
+  override def nullable: Boolean = false
+
+  override def withNewPlan(plan: ModularPlan): ModularExists = copy(plan = plan)
+
+  override def toString: String = s"modular-exists#${ exprId.id } $conditionString"
+
+  override lazy val canonicalized: Expression = {
+    ModularExists(
+      plan.canonicalized,
+      children.map(_.canonicalized),
+      ExprId(0))
+  }
+}
+
+/**
+ * A place holder for generated SQL for subquery expression.
+ */
+case class SubqueryHolder(override val sql: String) extends LeafExpression with Unevaluable {
+  override def dataType: DataType = NullType
+
+  override def nullable: Boolean = true
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/AggregatePushDown.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/AggregatePushDown.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/AggregatePushDown.scala
new file mode 100644
index 0000000..77efaf7
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/AggregatePushDown.scala
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.modular
+
+import scala.collection._
+
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeMap, Cast, Divide, ExprId, Literal, NamedExpression}
+import org.apache.spark.sql.catalyst.expressions.aggregate._
+
+trait AggregatePushDown { // self: ModularPlan =>
+
+  def findPushThroughAggregates(outputList: Seq[NamedExpression],
+      selAliasMap: AttributeMap[Attribute],
+      fact: ModularRelation): Map[Int, (NamedExpression, Seq[NamedExpression])] = {
+    var pushable = true
+    val map = scala.collection.mutable.Map[Int, (NamedExpression, Seq[NamedExpression])]()
+    outputList.zipWithIndex.foreach {
+      // TODO: find out if the first two case as follows really needed.  Comment out for now.
+      case (attr: Attribute, i) if !fact.outputSet.contains(attr) => pushable = false
+      case (alias: Alias, i)
+        if alias.child.isInstanceOf[Attribute] &&
+           !fact.outputSet.contains(alias.child.asInstanceOf[Attribute]) => pushable = false
+      case (alias: Alias, i) if alias.child.isInstanceOf[AggregateExpression] =>
+        val res = transformAggregate(
+          alias.child
+            .asInstanceOf[AggregateExpression],
+          selAliasMap,
+          i,
+          fact,
+          map,
+          Some((alias.name, alias.exprId)))
+        if (res.isEmpty) {
+          pushable = false
+        }
+      case (agg: AggregateExpression, i) =>
+        val res = transformAggregate(
+          agg,
+          selAliasMap,
+          i,
+          fact,
+          map,
+          None)
+        if (res.isEmpty) {
+          pushable = false
+        }
+      case _ =>
+    }
+    if (!pushable) {
+      Map.empty[Int, (NamedExpression, Seq[NamedExpression])]
+    } else {
+      map
+    }
+  }
+
+  private def transformAggregate(aggregate: AggregateExpression,
+      selAliasMap: AttributeMap[Attribute],
+      ith: Int,
+      fact: ModularRelation,
+      map: scala.collection.mutable.Map[Int, (NamedExpression, Seq[NamedExpression])],
+      aliasInfo: Option[(String, ExprId)]) = {
+    aggregate match {
+      case cnt@AggregateExpression(Count(exprs), _, false, _) if (exprs.length == 1 && exprs(0)
+        .isInstanceOf[Attribute]) =>
+        val tAttr = selAliasMap.get(exprs(0).asInstanceOf[Attribute]).getOrElse(exprs(0))
+          .asInstanceOf[Attribute]
+        if (fact.outputSet.contains(tAttr)) {
+          val cnt1 = AggregateExpression(Count(tAttr), cnt.mode, false)
+          val alias = Alias(cnt1, cnt1.toString)()
+          val tSum = AggregateExpression(Sum(alias.toAttribute), cnt.mode, false, cnt.resultId)
+          val (name, id) = aliasInfo.getOrElse(("", NamedExpression.newExprId))
+          map += (ith -> (Alias(tSum, name)(exprId = id), Seq(alias)))
+        } else {
+          Map.empty[Int, (NamedExpression, Seq[NamedExpression])]
+        }
+      case cnt@AggregateExpression(Count(exprs), _, false, _) if (exprs.length == 1 && exprs(0)
+        .isInstanceOf[Literal]) =>
+        val cnt1 = AggregateExpression(Count(exprs(0)), cnt.mode, false)
+        val alias = Alias(cnt1, cnt1.toString)()
+        val tSum = AggregateExpression(Sum(alias.toAttribute), cnt.mode, false, cnt.resultId)
+        val (name, id) = aliasInfo.getOrElse(("", NamedExpression.newExprId))
+        map += (ith -> (Alias(tSum, name)(exprId = id), Seq(alias)))
+      case sum@AggregateExpression(Sum(expr), _, false, _) if (expr.isInstanceOf[Attribute]) =>
+        val tAttr = selAliasMap.get(expr.asInstanceOf[Attribute]).getOrElse(expr)
+          .asInstanceOf[Attribute]
+        if (fact.outputSet.contains(tAttr)) {
+          val sum1 = AggregateExpression(Sum(tAttr), sum.mode, false)
+          val alias = Alias(sum1, sum1.toString)()
+          val tSum = AggregateExpression(Sum(alias.toAttribute), sum.mode, false, sum.resultId)
+          val (name, id) = aliasInfo.getOrElse(("", NamedExpression.newExprId))
+          map += (ith -> (Alias(tSum, name)(exprId = id), Seq(alias)))
+        } else {
+          Map.empty[Int, (NamedExpression, Seq[NamedExpression])]
+        }
+      case sum@AggregateExpression(Sum(Cast(expr, dataType, timeZoneId)), _, false, _)
+        if expr.isInstanceOf[Attribute] =>
+        val tAttr = selAliasMap.get(expr.asInstanceOf[Attribute]).getOrElse(expr)
+          .asInstanceOf[Attribute]
+        if (fact.outputSet.contains(tAttr)) {
+          val sum1 = AggregateExpression(Sum(Cast(tAttr, dataType, timeZoneId)), sum.mode, false)
+          val alias = Alias(sum1, sum1.toString)()
+          val tSum = AggregateExpression(Sum(alias.toAttribute), sum.mode, false, sum.resultId)
+          val (name, id) = aliasInfo.getOrElse(("", NamedExpression.newExprId))
+          map += (ith -> (Alias(tSum, name)(exprId = id), Seq(alias)))
+        } else {
+          Map.empty[Int, (NamedExpression, Seq[NamedExpression])]
+        }
+      case sum@AggregateExpression(Sum(expr), _, false, _) if (expr.isInstanceOf[Literal]) =>
+        val sum1 = AggregateExpression(Sum(expr), sum.mode, false)
+        val alias = Alias(sum1, sum1.toString)()
+        val tSum = AggregateExpression(Sum(alias.toAttribute), sum.mode, false, sum.resultId)
+        val (name, id) = aliasInfo.getOrElse(("", NamedExpression.newExprId))
+        map += (ith -> (Alias(tSum, name)(exprId = id), Seq(alias)))
+      case max@AggregateExpression(Max(expr), _, false, _) if (expr.isInstanceOf[Attribute]) =>
+        val tAttr = selAliasMap.get(expr.asInstanceOf[Attribute]).getOrElse(expr)
+          .asInstanceOf[Attribute]
+        if (fact.outputSet.contains(tAttr)) {
+          val max1 = AggregateExpression(Sum(tAttr), max.mode, false)
+          val alias = Alias(max1, max1.toString)()
+          val tMax = AggregateExpression(Max(alias.toAttribute), max.mode, false, max.resultId)
+          val (name, id) = aliasInfo.getOrElse(("", NamedExpression.newExprId))
+          map += (ith -> (Alias(tMax, name)(exprId = id), Seq(alias)))
+        } else {
+          Map.empty[Int, (NamedExpression, Seq[NamedExpression])]
+        }
+      case min@AggregateExpression(Min(expr), _, false, _) if (expr.isInstanceOf[Attribute]) =>
+        val tAttr = selAliasMap.get(expr.asInstanceOf[Attribute]).getOrElse(expr)
+          .asInstanceOf[Attribute]
+        if (fact.outputSet.contains(tAttr)) {
+          val min1 = AggregateExpression(Min(tAttr), min.mode, false)
+          val alias = Alias(min1, min1.toString)()
+          val tMin = AggregateExpression(Max(alias.toAttribute), min.mode, false, min.resultId)
+          val (name, id) = aliasInfo.getOrElse(("", NamedExpression.newExprId))
+          map += (ith -> (Alias(tMin, name)(exprId = id), Seq(alias)))
+        } else {
+          Map.empty[Int, (NamedExpression, Seq[NamedExpression])]
+        }
+      case avg@AggregateExpression(Average(expr), _, false, _) if (expr
+        .isInstanceOf[Attribute]) =>
+        val tAttr = selAliasMap.get(expr.asInstanceOf[Attribute]).getOrElse(expr)
+          .asInstanceOf[Attribute]
+        if (fact.outputSet.contains(tAttr)) {
+          val savg = AggregateExpression(Sum(tAttr), avg.mode, false)
+          val cavg = AggregateExpression(Count(tAttr), avg.mode, false)
+          val sAvg = Alias(savg, savg.toString)()
+          val cAvg = Alias(cavg, cavg.toString)()
+          val tAvg = Divide(sAvg.toAttribute, cAvg.toAttribute)
+          val (name, id) = aliasInfo.getOrElse(("", NamedExpression.newExprId))
+          map += (ith -> (Alias(tAvg, name)(exprId = id), Seq(sAvg, cAvg)))
+        } else {
+          Map.empty[Int, (NamedExpression, Seq[NamedExpression])]
+        }
+      case _ => Map.empty[Int, (NamedExpression, Seq[NamedExpression])]
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Flags.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Flags.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Flags.scala
new file mode 100644
index 0000000..2b9d8cf
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Flags.scala
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.modular
+
+trait Flags {
+  // each query allows only one of each of the following keywords
+  final val DISTINCT = 1L << 0
+  final val LIMIT = 1L << 1
+  final val SORT = 1L << 2
+  final val GLOBAL = 1L << 3
+  final val LOCAL = 1L << 4
+  final val EXPAND = 1L << 5
+
+  // to determine each Seq[Expression] in varagrgs belong to which keyword
+  //  final val SortLimit = SORT | LIMIT
+
+  def flagToString(flag: Long): String = {
+    flag match {
+      case DISTINCT => "DISTINCT"
+      case LIMIT => "LIMIT"
+    }
+  }
+
+  // List of the raw flags that have expressions as arguments
+  // TODO: add EXPAND
+  private def pickledWithExpressions = {
+    Array[Long](SORT, LIMIT, EXPAND)
+  }
+
+  final val MaxBitPosition = 6
+
+  final val pickledListOrder: List[Long] = {
+    val all = 0 to MaxBitPosition map (1L << _)
+    all.toList filter (pickledWithExpressions contains _)
+  }
+  final val rawFlagPickledOrder: Array[Long] = pickledListOrder.toArray
+
+  type FlagSet = Long
+
+  val NoFlags: FlagSet = 0L
+
+  implicit class FlagSetUtils(var flags: FlagSet) {
+    def hasFlag(mask: Long): Boolean = (flags & mask) != 0L
+
+    def hasFlag(mask: Int): Boolean = hasFlag(mask.toLong)
+
+    def setFlag(mask: Long): FlagSet = { flags |= mask; flags }
+
+    def resetFlag(mask: Long): FlagSet = { flags &= ~mask; flags }
+
+    def initFlags(mask: Long): FlagSet = { flags = mask; flags }
+  }
+
+}
+
+object Flags extends Flags

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Harmonizer.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Harmonizer.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Harmonizer.scala
new file mode 100644
index 0000000..91201a1
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Harmonizer.scala
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.modular
+
+import org.apache.spark.sql.catalyst.expressions._
+import org.apache.spark.sql.catalyst.plans._
+import org.apache.spark.sql.catalyst.rules._
+import org.apache.spark.sql.internal.SQLConf
+
+import org.apache.carbondata.mv.plans
+import org.apache.carbondata.mv.plans._
+import org.apache.carbondata.mv.plans.modular.Flags._
+
+abstract class Harmonizer(conf: SQLConf)
+  extends RuleExecutor[ModularPlan] {
+
+  //  protected val fixedPoint = FixedPoint(conf.getConfString("spark.mv.harmonizer
+  // .maxIterations").toInt)
+  protected val fixedPoint = FixedPoint(conf.optimizerMaxIterations)
+
+  def batches: Seq[Batch] = {
+    Batch(
+      "Data Harmonizations", fixedPoint,
+      HarmonizeDimensionTable,
+      HarmonizeFactTable) :: Nil
+  }
+}
+
+/**
+ * An default Harmonizer
+ */
+object DefaultHarmonizer extends DefaultHarmonizer
+
+class DefaultHarmonizer extends Harmonizer(new SQLConf())
+
+object HarmonizeDimensionTable extends Rule[ModularPlan] with PredicateHelper {
+
+  def apply(plan: ModularPlan): ModularPlan = {
+    plan transform {
+      case s@Select(_, _, _, _, jedges, fact :: dims, _, _, _, _) if
+      jedges.forall(e => e.joinType == LeftOuter || e.joinType == Inner) &&
+      fact.isInstanceOf[ModularRelation] &&
+      dims.filterNot(_.isInstanceOf[modular.LeafNode]).nonEmpty &&
+      dims.forall(d => (d.isInstanceOf[ModularRelation] || HarmonizedRelation.canHarmonize(d))) => {
+        var tPullUpPredicates = Seq.empty[Expression]
+        val tChildren = fact :: dims.map {
+          case m: ModularRelation => m
+          case h@GroupBy(
+          _,
+          _,
+          _,
+          _,
+          s1@Select(_, _, _, _, _, dim :: Nil, NoFlags, Nil, Nil, _),
+          NoFlags,
+          Nil, _) if (dim.isInstanceOf[ModularRelation]) => {
+            val rAliasMap = AttributeMap(h.outputList
+              .collect { case a: Alias => (a.child.asInstanceOf[Attribute], a.toAttribute) })
+            val pullUpPredicates = s1.predicateList
+              .map(replaceAlias(_, rAliasMap.asInstanceOf[AttributeMap[Expression]]))
+            if (pullUpPredicates.forall(cond => canEvaluate(cond, h))) {
+              tPullUpPredicates = tPullUpPredicates ++ pullUpPredicates
+              plans.modular.HarmonizedRelation(h.copy(child = s1.copy(predicateList = Nil)))
+            } else {
+              h
+            }
+          }
+          // case _ =>
+        }
+        if (tChildren.forall(_.isInstanceOf[modular.LeafNode])) {
+          s.copy(predicateList = s.predicateList ++ tPullUpPredicates, children = tChildren)
+        } else {
+          s
+        }
+      }
+      //        s.withNewChildren(fact :: dims.map { case m: modular.ModularRelation => m; case h
+      // => HarmonizedRelation(h) })}
+      //        s.copy(predicateList = predicateList ++ moveUpPredicates, children = tChildren)}
+      // fact :: dims.map { case m: modular.ModularRelation => m; case h => HarmonizedRelation(h)
+      // })}
+    }
+  }
+
+}
+
+object HarmonizeFactTable extends Rule[ModularPlan] with PredicateHelper with AggregatePushDown {
+
+  def apply(plan: ModularPlan): ModularPlan = {
+    plan transform {
+      case g@GroupBy(_, _, _, _,
+        s@Select(_, _, _, aliasm, jedges, fact :: dims, _, _, _, _), _, _, _)
+        if s.adjacencyList.keySet.size <= 1 &&
+           jedges.forall(e => e.joinType == Inner) && // !s.flags.hasFlag(DISTINCT) &&
+           fact.isInstanceOf[ModularRelation] &&
+           (fact :: dims).forall(_.isInstanceOf[modular.LeafNode]) &&
+           dims.nonEmpty => {
+        val selAliasMap = AttributeMap(s.outputList.collect {
+          case a: Alias if (a.child.isInstanceOf[Attribute]) => (a.toAttribute, a.child
+            .asInstanceOf[Attribute])
+        })
+        val aggTransMap = findPushThroughAggregates(
+          g.outputList,
+          selAliasMap,
+          fact.asInstanceOf[ModularRelation])
+
+        val constraintsAttributeSet = dims.flatMap(s.extractEvaluableConditions(_))
+          .map(_.references)
+          .foldLeft(AttributeSet.empty)(_ ++ _)
+        val groupingAttributeSet = g.predicateList.map(_.references)
+          .foldLeft(AttributeSet.empty)(_ ++ _)
+        if (aggTransMap.isEmpty ||
+            // TODO: the following condition is too pessimistic, more work needed using methods
+            // similar to those in trait
+            //      QueryPlanConstraints
+            !constraintsAttributeSet.subsetOf(groupingAttributeSet)) {
+          g
+        } else {
+          val starJExprs = dims.flatMap(dim => s.extractJoinConditions(fact, dim)).toSeq
+          val gJAttributes = starJExprs.map(expr => expr.references)
+            .foldLeft(AttributeSet.empty)(_ ++ _).filter(fact.outputSet.contains(_))
+          val fExprs = s.extractEvaluableConditions(fact)
+          val gFAttributes = fExprs.map(expr => expr.references)
+            .foldLeft(AttributeSet.empty)(_ ++ _)
+            .filter(fact.outputSet.contains(_))
+          val gGAttributes = g.predicateList.map(expr => expr.references)
+            .foldLeft(AttributeSet.empty)(_ ++ _).filter(fact.outputSet.contains(_))
+          val gAttributes = (gJAttributes ++ gFAttributes ++ gGAttributes).toSeq
+
+          val oAggregates = aggTransMap.map(_._2).flatMap(_._2).toSeq
+
+          val tAliasMap = (aliasm.get(0) match {
+            case Some(name) => Seq((0, name));
+            case _ => Seq.empty
+          }).toMap
+          val sOutput = (oAggregates.map(_.references).foldLeft(AttributeSet.empty)(_ ++ _) ++
+                         AttributeSet(gAttributes)).toSeq
+          val hFactSel = plans.modular
+            .Select(
+              sOutput,
+              fact.output,
+              Seq.empty,
+              tAliasMap,
+              Seq.empty,
+              fact :: Nil,
+              NoFlags,
+              Seq.empty,
+              Seq.empty)
+          val hFact = plans.modular
+            .GroupBy(
+              gAttributes ++ oAggregates,
+              sOutput,
+              gAttributes,
+              None,
+              hFactSel,
+              NoFlags,
+              Seq.empty)
+          val hFactName = s"gen_harmonized_${
+            fact.asInstanceOf[ModularRelation]
+              .databaseName
+          }_${ fact.asInstanceOf[ModularRelation].tableName }"
+          val hAliasMap = (aliasm - 0) + (0 -> hFactName)
+          val hInputList = gAttributes ++ oAggregates.map(_.toAttribute) ++
+                           dims.flatMap(_.asInstanceOf[modular.LeafNode].output).toSeq
+          // val hPredicateList = s.predicateList
+          val attrOutputList = s.outputList.filter(expr => (expr.isInstanceOf[Attribute]) ||
+                                                           (expr.isInstanceOf[Alias] &&
+                                                            expr.asInstanceOf[Alias].child
+                                                              .isInstanceOf[Attribute]))
+          val aggOutputList = aggTransMap.values.flatMap(t => t._2)
+            .map { ref =>
+              AttributeReference(ref.name, ref.dataType)(
+                exprId = ref.exprId,
+                qualifier = Some(hFactName))
+            }
+          val hFactOutputSet = hFact.outputSet
+          // Update the outputlist qualifier
+          val hOutputList = (attrOutputList ++ aggOutputList).map {attr =>
+            attr.transform {
+              case ref: Attribute if hFactOutputSet.contains(ref) =>
+                AttributeReference(ref.name, ref.dataType)(
+                  exprId = ref.exprId,
+                  qualifier = Some(hFactName))
+            }
+          }.asInstanceOf[Seq[NamedExpression]]
+
+          // Update the predicate qualifier
+          val hPredList = s.predicateList.map{ pred =>
+            pred.transform {
+              case ref: Attribute if hFactOutputSet.contains(ref) =>
+                AttributeReference(ref.name, ref.dataType)(
+                  exprId = ref.exprId,
+                  qualifier = Some(hFactName))
+            }
+          }
+          val hSel = s.copy(
+              outputList = hOutputList,
+              inputList = hInputList,
+              aliasMap = hAliasMap,
+              predicateList = hPredList,
+              children = hFact :: dims)
+          val gOutputList = g.outputList.zipWithIndex
+            .map { case (expr, index) =>
+              if (aggTransMap.keySet.contains(index)) {
+                aggTransMap(index)
+                  ._1
+              } else {
+                expr
+              }
+            }
+
+          val wip = g.copy(outputList = gOutputList, inputList = hInputList, child = hSel)
+          wip.transformExpressions {
+            case ref: Attribute if hFactOutputSet.contains(ref) =>
+              AttributeReference(ref.name, ref.dataType)(
+                exprId = ref.exprId,
+                qualifier = Some(hFactName))
+          }
+        }
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPatterns.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPatterns.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPatterns.scala
new file mode 100644
index 0000000..c546c6e
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPatterns.scala
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.modular
+
+import org.apache.spark.sql.catalyst.expressions.{Expression, NamedExpression, PredicateHelper, _}
+import org.apache.spark.sql.catalyst.plans.logical.{Distinct, Limit, LogicalPlan, Window}
+
+import org.apache.carbondata.mv.plans.{Pattern, _}
+import org.apache.carbondata.mv.plans.modular.Flags._
+import org.apache.carbondata.mv.plans.util.{ExtractGroupByModule, ExtractSelectModule, ExtractSelectModuleForWindow, ExtractTableModule, ExtractUnionModule}
+
+object SimpleModularizer extends ModularPatterns {
+  def patterns: Seq[Pattern] = {
+    SelectModule ::
+    GroupByModule ::
+    UnionModule ::
+    DataSourceModule :: Nil
+  }
+
+  override protected def collectPlaceholders(plan: ModularPlan): Seq[(ModularPlan, LogicalPlan)] = {
+    plan.collect {
+      case placeholder@ModularizeLater(logicalPlan) => placeholder -> logicalPlan
+    }
+  }
+
+  override protected def prunePlans(plans: Iterator[ModularPlan]): Iterator[ModularPlan] = {
+    plans
+    //    plans.filter(_.collect { case n if n.subqueries.nonEmpty => n }.isEmpty)
+    // TODO: find out why the following stmt not working
+    //    plans.filter(_.find { case n if n.subqueries.nonEmpty => true }.isEmpty)
+  }
+
+  override protected def makeupAliasMappings(
+      plans: Iterator[ModularPlan]): Iterator[ModularPlan] = {
+    def makeup(plan: ModularPlan): ModularPlan = {
+      plan transform {
+        case g@GroupBy(_, _, _, _, s@Select(_, _, _, aliasmap, _, children, _, _, _, _), _, _, _) =>
+          val aq = AttributeSet(g.outputList).filter(_.qualifier.nonEmpty)
+          val makeupmap = children.zipWithIndex.flatMap {
+            case (child, i) =>
+              aq.find(child.outputSet.contains(_)).map(_.qualifier).flatten.map((i, _))
+          }.toMap
+          g.copy(child = s.copy(aliasMap = makeupmap ++ aliasmap))
+      }
+    }
+
+    plans.map(makeup)
+  }
+}
+
+abstract class ModularPattern extends GenericPattern[ModularPlan] {
+
+  override protected def modularizeLater(plan: LogicalPlan): ModularPlan = ModularizeLater(plan)
+}
+
+case class ModularizeLater(plan: LogicalPlan) extends LeafNode {
+  override def output: Seq[Attribute] = plan.output
+}
+
+abstract class ModularPatterns extends Modularizer[ModularPlan] {
+
+  //  self: MQOContext#SparkyModeler =>
+
+  object SelectModule extends Pattern with PredicateHelper {
+
+    private[this] def makeSelectModule(
+        output: Seq[NamedExpression],
+        input: Seq[Expression],
+        predicate: Seq[Expression],
+        aliasmap: Map[Int, String],
+        joinedge: Seq[JoinEdge],
+        flags: FlagSet,
+        children: Seq[ModularPlan],
+        flagSpec: Seq[Seq[Any]],
+        windowSpec: Seq[Seq[Any]]) = {
+      Seq(Select(
+        output,
+        input,
+        predicate,
+        aliasmap,
+        joinedge,
+        children,
+        flags,
+        flagSpec,
+        windowSpec))
+    }
+
+    def apply(plan: LogicalPlan): Seq[ModularPlan] = {
+      plan match {
+        case Distinct(
+          ExtractSelectModule(output, input, predicate, aliasmap, joinedge, children, flags1,
+          fspec1, wspec)) =>
+          val flags = flags1.setFlag(DISTINCT)
+          makeSelectModule(output, input, predicate, aliasmap, joinedge, flags,
+            children.map(modularizeLater), fspec1, wspec)
+
+        case Limit(
+          limitExpr,
+          Distinct(
+            ExtractSelectModule(output, input, predicate, aliasmap, joinedge, children,
+              flags1, fspec1, wspec))) =>
+          val flags = flags1.setFlag(DISTINCT).setFlag(LIMIT)
+          makeSelectModule(output, input, predicate, aliasmap, joinedge, flags,
+            children.map(modularizeLater), Seq(Seq(limitExpr)) ++ fspec1, wspec)
+
+        case Limit(
+          limitExpr,
+          ExtractSelectModule(output, input, predicate, aliasmap, joinedge, children, flags1,
+            fspec1, wspec)) =>
+          val flags = flags1.setFlag(LIMIT)
+          makeSelectModule(output, input, predicate, aliasmap, joinedge, flags,
+            children.map(modularizeLater), Seq(Seq(limitExpr)) ++ fspec1, wspec)
+
+        case ExtractSelectModule(output, input, predicate, aliasmap, joinedge, children, flags1,
+        fspec1, wspec) =>
+          makeSelectModule(output, input, predicate, aliasmap, joinedge, flags1,
+            children.map(modularizeLater), fspec1, wspec)
+
+        case Window(exprs, _, _,
+          ExtractSelectModuleForWindow(output, input, predicate, aliasmap, joinedge, children,
+            flags1, fspec1, wspec)) =>
+          val sel1 = makeSelectModule(output, input, predicate, aliasmap, joinedge, flags1,
+            children.map(modularizeLater), fspec1, wspec)
+          makeSelectModule(
+            output.map(_.toAttribute),
+            output.map(_.toAttribute),
+            Seq.empty,
+            Map.empty,
+            Seq.empty,
+            NoFlags,
+            sel1,
+            Seq.empty,
+            Seq(Seq(exprs)) ++ wspec)
+
+        case _ => Nil
+      }
+    }
+  }
+
+  object GroupByModule extends Pattern with PredicateHelper {
+
+    private[this] def makeGroupByModule(
+        output: Seq[NamedExpression],
+        input: Seq[Expression],
+        predicate: Seq[Expression],
+        flags: FlagSet,
+        alias: Option[String],
+        child: ModularPlan,
+        fspec: Seq[Seq[Any]]) = {
+      val groupby = Some(GroupBy(output, input, predicate, alias, child, flags, fspec))
+      groupby.map(Seq(_)).getOrElse(Nil)
+    }
+
+    def apply(plan: LogicalPlan): Seq[ModularPlan] = {
+      plan match {
+        case Limit(
+        limitExpr,
+        ExtractGroupByModule(output, input, predicate, alias, child, flags1, fspec1)) =>
+          val flags = flags1.setFlag(LIMIT)
+          makeGroupByModule(
+            output,
+            input,
+            predicate,
+            flags,
+            alias,
+            modularizeLater(child),
+            Seq(Seq(limitExpr)) ++ fspec1)
+        case ExtractGroupByModule(output, input, predicate, alias, child, flags1, fspec1) =>
+          makeGroupByModule(output, input, predicate, flags1, alias, modularizeLater(child), fspec1)
+        case _ => Nil
+      }
+    }
+  }
+
+  object UnionModule extends Pattern with PredicateHelper {
+
+    private[this] def makeUnionModule(
+        flags: FlagSet,
+        children: Seq[ModularPlan],
+        fspec: Seq[Seq[Any]]) = {
+      Seq(modular.Union(children, flags, fspec))
+    }
+
+    def apply(plan: LogicalPlan): Seq[ModularPlan] = {
+      plan match {
+        case Distinct(ExtractUnionModule(children, flags1, fspec1)) =>
+          val flags = flags1.setFlag(DISTINCT)
+          makeUnionModule(flags, children.map(modularizeLater), fspec1)
+        case Limit(limitExpr, Distinct(ExtractUnionModule(children, flags1, fspec1))) =>
+          val flags = flags1.setFlag(DISTINCT).setFlag(LIMIT)
+          makeUnionModule(flags, children.map(modularizeLater), Seq(Seq(limitExpr)) ++ fspec1)
+        case Limit(limitExpr, ExtractUnionModule(children, flags1, fspec1)) =>
+          val flags = flags1.setFlag(LIMIT)
+          makeUnionModule(flags, children.map(modularizeLater), Seq(Seq(limitExpr)) ++ fspec1)
+        case ExtractUnionModule(children, flags1, fspec1) =>
+          makeUnionModule(flags1, children.map(modularizeLater), fspec1)
+        case _ => Nil
+      }
+    }
+  }
+
+  object DataSourceModule extends Pattern with Flags with PredicateHelper {
+
+    private[this] def makeDataSourceModule(
+        databaseName: String,
+        tableName: String,
+        output: Seq[NamedExpression],
+        flags: FlagSet,
+        fspec: Seq[Seq[Any]]) = {
+      Seq(ModularRelation(databaseName, tableName, output, flags, fspec))
+    }
+
+    def apply(plan: LogicalPlan): Seq[ModularPlan] = {
+      plan match {
+        case ExtractTableModule(databaseName, tableName, output, Nil, flags1, fspec1) =>
+          makeDataSourceModule(databaseName, tableName, output, flags1, fspec1)
+        case _ => Nil
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPlan.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPlan.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPlan.scala
new file mode 100644
index 0000000..c66df19
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPlan.scala
@@ -0,0 +1,206 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.modular
+
+import scala.collection._
+import scala.collection.mutable.{HashMap, MultiMap}
+
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.{Expression, PredicateHelper}
+import org.apache.spark.sql.catalyst.plans.{JoinType, QueryPlan}
+import org.apache.spark.sql.catalyst.plans.logical.Statistics
+import org.apache.spark.sql.catalyst.trees.TreeNode
+import org.apache.spark.sql.internal.SQLConf
+
+import org.apache.carbondata.mv.plans._
+import org.apache.carbondata.mv.plans.util.{Printers, Signature, SQLBuilder}
+
+abstract class ModularPlan
+  extends QueryPlan[ModularPlan]
+    with AggregatePushDown
+    with Logging
+    with Serializable
+    with PredicateHelper with Printers {
+
+  /**
+   * the first two are to support sub-query expressions
+   */
+
+  lazy val resolved: Boolean = expressions.forall(_.resolved) && childrenResolved
+
+  def childrenResolved: Boolean = children.forall(_.resolved)
+
+  private var statsCache: Option[Statistics] = None
+
+  final def stats(spark: SparkSession, conf: SQLConf): Statistics = {
+    statsCache.getOrElse {
+      statsCache = Some(computeStats(spark, conf))
+      statsCache.get
+    }
+  }
+
+  final def invalidateStatsCache(): Unit = {
+    statsCache = None
+    children.foreach(_.invalidateStatsCache())
+  }
+
+  protected def computeStats(spark: SparkSession, conf: SQLConf): Statistics = {
+    //    spark.conf.set("spark.sql.cbo.enabled", true)
+    val sqlStmt = asOneLineSQL
+    val plan = spark.sql(sqlStmt).queryExecution.optimizedPlan
+    plan.stats(conf)
+  }
+
+  override def fastEquals(other: TreeNode[_]): Boolean = {
+    this.eq(other)
+  }
+
+  private var _rewritten: Boolean = false
+
+  /**
+   * Marks this plan as already rewritten.
+   */
+  private[mv] def setRewritten(): ModularPlan = {
+    _rewritten = true
+    children.foreach(_.setRewritten())
+    this
+  }
+
+  /**
+   * Returns true if this node and its children have already been gone through query rewrite.
+   * Note this this is only an optimization used to avoid rewriting trees that have already
+   * been rewritten, and can be reset by transformations.
+   */
+  def rewritten: Boolean = {
+    _rewritten
+  }
+
+  private var _skip: Boolean = false
+
+  private[mv] def setSkip(): ModularPlan = {
+    _skip = true
+    children.foreach(_.setSkip())
+    this
+  }
+
+  private[mv] def resetSkip(): ModularPlan = {
+    _skip = false
+    children.foreach(_.resetSkip())
+    this
+  }
+
+  def skip: Boolean = _skip
+
+  def isSPJGH: Boolean = {
+    this match {
+      case modular.Select(_, _, _, _, _,
+        Seq(modular.GroupBy(_, _, _, _,
+        sel_c2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
+        if sel_c2.children.forall(_.isInstanceOf[modular.LeafNode]) => true
+
+      case modular.GroupBy(_, _, _, _, sel_c2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)
+        if sel_c2.children.forall(_.isInstanceOf[modular.LeafNode]) => true
+
+      case modular.Select(_, _, _, _, _, children, _, _, _, _)
+        if children.forall(_.isInstanceOf[modular.LeafNode]) => true
+
+      case _ => false
+    }
+  }
+
+  def signature: Option[Signature] = ModularPlanSignatureGenerator.generate(this)
+
+  def createMutableAdjacencyList(
+      edges: Seq[JoinEdge]
+  ): mutable.HashMap[Int, mutable.Set[(Int, JoinType)]] with mutable.MultiMap[Int, (Int, JoinType)]
+  = {
+    val mm = new HashMap[Int, mutable.Set[(Int, JoinType)]] with MultiMap[Int, (Int, JoinType)]
+    for (edge <- edges) { mm.addBinding(edge.left, (edge.right, edge.joinType)) }
+    mm
+  }
+
+  def createImmutableAdjacencyList(edges: Seq[JoinEdge]): Predef.Map[Int, Seq[(Int, JoinType)]] = {
+    edges.groupBy { _.left }.map { case (k, v) => (k, v.map(e => (e.right, e.joinType))) }
+  }
+
+  def adjacencyList: Map[Int, Seq[(Int, JoinType)]] = Map.empty
+
+  def extractJoinConditions(left: ModularPlan, right: ModularPlan): Seq[Expression] = Seq.empty
+
+  def extractRightEvaluableConditions(left: ModularPlan, right: ModularPlan): Seq[Expression] =
+    Seq.empty
+
+  def extractEvaluableConditions(plan: ModularPlan): Seq[Expression] = Seq.empty
+
+  def asCompactSQL: String = asCompactString(new SQLBuilder(this).fragmentExtract)
+
+  def asOneLineSQL: String = asOneLineString(new SQLBuilder(this).fragmentExtract)
+
+  // for plan without sub-query expression only
+  def asOneLineSQL(subqueryPrefix: String): String = {
+    asOneLineString(new SQLBuilder(
+      this,
+      subqueryPrefix).fragmentExtract)
+  }
+
+  /**
+   * Returns a plan where a best effort attempt has been made to transform `this` in a way
+   * that preserves the result but replaces harmonized dimension table with HarmonizedRelation
+   * and fact table with sub-plan that pre-aggregates the table before join with dimension table
+   *
+   * Some nodes should overwrite this to provide proper harmonization logic.
+   */
+  lazy val harmonized: ModularPlan = DefaultHarmonizer.execute(preHarmonized)
+
+  /**
+   * Do some simple transformation on this plan before harmonizing. Implementations can override
+   * this method to provide customized harmonize logic without rewriting the whole logic.
+   *
+   * We assume queries need to be harmonized are of the form:
+   *
+   * FACT (left) join (harmonized) DIM1 (left) join (harmonized) DIM2 ...
+   *
+   * For queries of not this form, customize this method for them to conform this form.
+   */
+  protected def preHarmonized: ModularPlan = {
+    this
+  }
+}
+
+object ModularPlan extends PredicateHelper {
+
+}
+
+abstract class LeafNode extends ModularPlan {
+  override def children: Seq[ModularPlan] = Nil
+}
+
+abstract class UnaryNode extends ModularPlan {
+  def child: ModularPlan
+
+  override def children: Seq[ModularPlan] = child :: Nil
+}
+
+abstract class BinaryNode extends ModularPlan {
+  def left: ModularPlan
+
+  def right: ModularPlan
+
+  override def children: Seq[ModularPlan] = Seq(left, right)
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPlanSignatureGenerator.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPlanSignatureGenerator.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPlanSignatureGenerator.scala
new file mode 100644
index 0000000..86cae36
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularPlanSignatureGenerator.scala
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.modular
+
+import org.apache.carbondata.mv.plans._
+import org.apache.carbondata.mv.plans.util.{Signature, SignatureGenerator, SignatureRule}
+
+object ModularPlanSignatureGenerator extends SignatureGenerator[ModularPlan] {
+  lazy val rule: SignatureRule[ModularPlan] = ModularPlanRule
+
+  override def generate(plan: ModularPlan): Option[Signature] = {
+    if (plan.isSPJGH) {
+      super.generate(plan)
+    } else {
+      None
+    }
+  }
+}
+
+object ModularPlanRule extends SignatureRule[ModularPlan] {
+
+  def apply(plan: ModularPlan, childSignatures: Seq[Option[Signature]]): Option[Signature] = {
+
+    plan match {
+      case modular.Select(_, _, _, _, _, _, _, _, _, _) =>
+        if (childSignatures.map { _.getOrElse(Signature()).groupby }.forall(x => !x)) {
+          Some(Signature(
+            groupby = false,
+            childSignatures.flatMap { _.getOrElse(Signature()).datasets.toSeq }.toSet))
+        } else if (childSignatures.length == 1 &&
+                   childSignatures(0).getOrElse(Signature()).groupby) {
+          childSignatures(0)
+        } else {
+          None
+        }
+      case modular.GroupBy(_, _, _, _, _, _, _, _) =>
+        if (childSignatures.length == 1 && !childSignatures(0).getOrElse(Signature()).groupby) {
+          Some(Signature(groupby = true, childSignatures(0).getOrElse(Signature()).datasets))
+        } else {
+          None
+        }
+      case HarmonizedRelation(source) =>
+        source.signature match {
+          case Some(s) =>
+            Some(Signature(groupby = false, s.datasets))
+          case _ =>
+            None
+        }
+      case modular.ModularRelation(dbname, tblname, _, _, _) =>
+        if (dbname != null && tblname != null) {
+          Some(Signature(groupby = false, Set(Seq(dbname, tblname).mkString("."))))
+        } else {
+          Some(Signature(groupby = false, Set(plan.toString())))
+        }
+      case _ => None
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularRelation.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularRelation.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularRelation.scala
new file mode 100644
index 0000000..dec0c48
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/ModularRelation.scala
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.modular
+
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeMap, AttributeReference, NamedExpression}
+import org.apache.spark.sql.catalyst.plans.JoinType
+import org.apache.spark.sql.catalyst.plans.logical
+import org.apache.spark.sql.catalyst.plans.logical.Statistics
+import org.apache.spark.sql.internal.SQLConf
+
+import org.apache.carbondata.mv.plans.modular.Flags._
+
+object ModularRelation {
+  def apply(outputList: NamedExpression*): ModularRelation = {
+    new ModularRelation(
+      "test",
+      "test",
+      outputList,
+      NoFlags,
+      Seq.empty)
+  }
+}
+
+case class ModularRelation(databaseName: String,
+    tableName: String,
+    outputList: Seq[NamedExpression],
+    flags: FlagSet,
+    rest: Seq[Seq[Any]]) extends LeafNode {
+  override def computeStats(spark: SparkSession, conf: SQLConf): Statistics = {
+    val plan = spark.table(s"${ databaseName }.${ tableName }").queryExecution.optimizedPlan
+    val stats = plan.stats(conf)
+    val output = outputList.map(_.toAttribute)
+    val mapSeq = plan.collect { case n: logical.LeafNode => n }.map {
+      table => AttributeMap(table.output.zip(output))
+    }
+    val rewrites = mapSeq(0)
+    val attributeStats = AttributeMap(stats.attributeStats.iterator
+      .map { pair => (rewrites(pair._1), pair._2) }.toSeq)
+    Statistics(stats.sizeInBytes, stats.rowCount, attributeStats, stats.hints)
+  }
+
+  override def output: Seq[Attribute] = outputList.map(_.toAttribute)
+
+  override def adjacencyList: Map[Int, Seq[(Int, JoinType)]] = Map.empty
+
+  override def equals(that: Any): Boolean = {
+    that match {
+      case that: ModularRelation =>
+        if ((databaseName != null && tableName != null && databaseName == that.databaseName &&
+             tableName == that.tableName) ||
+            (databaseName == null && tableName == null && that.databaseName == null &&
+             that.tableName == null && output.toString == (that.output).toString)) {
+          true
+        } else {
+          false
+        }
+      case _ => false
+    }
+  }
+}
+
+object HarmonizedRelation {
+  def canHarmonize(source: ModularPlan): Boolean = {
+    source match {
+      case g@GroupBy(
+      _,
+      _,
+      _,
+      _,
+      Select(_, _, _, _, _, dim :: Nil, NoFlags, Nil, Nil, _),
+      NoFlags,
+      Nil, _) if (dim.isInstanceOf[ModularRelation]) =>
+        if (g.outputList
+          .forall(col => col.isInstanceOf[AttributeReference] ||
+                         (col.isInstanceOf[Alias] &&
+                          col.asInstanceOf[Alias].child.isInstanceOf[AttributeReference]))) {
+          true
+        } else {
+          false
+        }
+      case _ => false
+    }
+  }
+}
+
+// support harmonization for dimension table
+case class HarmonizedRelation(source: ModularPlan) extends LeafNode {
+  require(HarmonizedRelation.canHarmonize(source), "invalid plan for harmonized relation")
+  lazy val tableName = source.asInstanceOf[GroupBy].child.children(0).asInstanceOf[ModularRelation]
+    .tableName
+  lazy val databaseName = source.asInstanceOf[GroupBy].child.children(0)
+    .asInstanceOf[ModularRelation].databaseName
+
+  //  override def computeStats(spark: SparkSession, conf: SQLConf): Statistics = source.stats
+  // (spark, conf)
+  override def computeStats(spark: SparkSession, conf: SQLConf): Statistics = {
+    val plan = spark.table(s"${ databaseName }.${ tableName }").queryExecution.optimizedPlan
+    val stats = plan.stats(conf)
+    val output = source.asInstanceOf[GroupBy].child.children(0).asInstanceOf[ModularRelation]
+      .outputList.map(_.toAttribute)
+    val mapSeq = plan.collect { case n: logical.LeafNode => n }.map {
+      table => AttributeMap(table.output.zip(output))
+    }
+    val rewrites = mapSeq(0)
+    val aliasMap = AttributeMap(
+      source.asInstanceOf[GroupBy].outputList.collect {
+        case a: Alias if (a.child.isInstanceOf[Attribute]) => (a.child.asInstanceOf[Attribute], a
+          .toAttribute)
+      })
+    val aStatsIterator = stats.attributeStats.iterator.map { pair => (rewrites(pair._1), pair._2) }
+    val attributeStats = AttributeMap(aStatsIterator
+      .map(pair => ((aliasMap.get(pair._1)).getOrElse(pair._1), pair._2)).toSeq)
+
+    Statistics(stats.sizeInBytes, None, attributeStats, stats.hints)
+  }
+
+  override def output: Seq[Attribute] = source.output
+
+  // two harmonized modular relations are equal only if orders of output columns of
+  // their source plans are exactly the same
+  override def equals(that: Any): Boolean = {
+    that match {
+      case that: HarmonizedRelation => source.sameResult(that.source)
+      case _ => false
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Modularizer.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Modularizer.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Modularizer.scala
new file mode 100644
index 0000000..d255359
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Modularizer.scala
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.modular
+
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.catalyst.expressions.{Exists, ListQuery, ScalarSubquery}
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.trees.TreeNode
+
+import org.apache.carbondata.mv.expressions.modular._
+import org.apache.carbondata.mv.plans._
+
+abstract class GenericPattern[TreeType <: TreeNode[TreeType]] extends Logging {
+  protected def modularizeLater(plan: LogicalPlan): TreeType
+
+  def apply(plan: LogicalPlan): Seq[TreeType]
+}
+
+abstract class Modularizer[TreeType <: TreeNode[TreeType]] {
+  def patterns: Seq[GenericPattern[TreeType]]
+
+  // protected def modularizeLater(plan: LogicalPlan) = this.modularize(plan).next()
+
+  def modularize(plan: LogicalPlan): Iterator[TreeType] = {
+    val replaced = plan.transformAllExpressions {
+      case s: ScalarSubquery =>
+        if (s.children.isEmpty) {
+          ScalarModularSubquery(
+            modularize(s.plan).next.asInstanceOf[ModularPlan],
+            s.children,
+            s.exprId)
+        } else {
+          throw new UnsupportedOperationException(s"Expression $s doesn't canonicalized")
+        }
+      case l: ListQuery =>
+        if (l.children.isEmpty) {
+          ModularListQuery(modularize(l.plan).next.asInstanceOf[ModularPlan], l.children, l.exprId)
+        } else {
+          throw new UnsupportedOperationException(s"Expression $l doesn't canonicalized")
+        }
+      case e: Exists =>
+        if (e.children.isEmpty) {
+          ModularExists(modularize(e.plan).next.asInstanceOf[ModularPlan], e.children, e.exprId)
+        } else {
+          throw new UnsupportedOperationException(s"Expression $e doesn't canonicalized")
+        }
+      case o => o
+    }
+    //    val replaced = plan
+    val mplans = modularizeCore(replaced)
+    makeupAliasMappings(mplans)
+  }
+
+  private def modularizeCore(plan: LogicalPlan): Iterator[TreeType] = {
+    // Collect modular plan candidates.
+    val candidates = patterns.iterator.flatMap(_ (plan))
+
+    // The candidates may contain placeholders marked as [[modularizeLater]],
+    // so try to replace them by their child plans.
+    val plans = candidates.flatMap { candidate =>
+      val placeholders = collectPlaceholders(candidate)
+
+      if (placeholders.isEmpty) {
+        // Take the candidate as is because it does not contain placeholders.
+        Iterator(candidate)
+      } else {
+        // Plan the logical plan marked as [[modularizeLater]] and replace the placeholders.
+        placeholders.iterator.foldLeft(Iterator(candidate)) {
+          case (candidatesWithPlaceholders, (placeholder, logicalPlan)) =>
+            // Modularize the logical plan for the placeholder
+            val childPlans = this.modularizeCore(logicalPlan)
+
+            candidatesWithPlaceholders.flatMap { candidateWithPlaceholder =>
+              childPlans.map { childPlan =>
+                // Replace the placeholder by the child plan
+                candidateWithPlaceholder.transformUp {
+                  case p if p == placeholder => childPlan
+                }
+              }
+            }
+        }
+      }
+    }
+
+    val pruned = prunePlans(plans)
+    // val iter = patterns.view.flatMap(_(plan)).toIterator
+    supports(
+      pruned.hasNext,
+      s"Modular plan not supported (e.g. has subquery expression) for \n$plan")
+    //    makeupAliasMappings(pruned)
+    pruned
+  }
+
+  /** Collects placeholders marked as [[modularizeLater]] by pattern and its [[LogicalPlan]]s */
+  protected def collectPlaceholders(plan: TreeType): Seq[(TreeType, LogicalPlan)]
+
+  /** Prunes bad plans to prevent combinatorial explosion. */
+  protected def prunePlans(plans: Iterator[TreeType]): Iterator[TreeType]
+
+  protected def makeupAliasMappings(plans: Iterator[TreeType]): Iterator[TreeType]
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/basicOperators.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/basicOperators.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/basicOperators.scala
new file mode 100644
index 0000000..0a76da1
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/basicOperators.scala
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.modular
+
+import org.apache.spark.sql.catalyst.expressions.{Attribute, _}
+import org.apache.spark.sql.catalyst.plans.JoinType
+
+import org.apache.carbondata.mv.plans._
+import org.apache.carbondata.mv.plans.modular.Flags._
+
+private[mv] trait Matchable extends ModularPlan {
+  def outputList: Seq[NamedExpression]
+
+  def predicateList: Seq[Expression]
+}
+
+case class GroupBy(
+    outputList: Seq[NamedExpression],
+    inputList: Seq[Expression],
+    predicateList: Seq[Expression],
+    alias: Option[String],
+    child: ModularPlan,
+    flags: FlagSet,
+    flagSpec: Seq[Seq[Any]],
+    dataMapTableRelation: Option[ModularPlan] = None) extends UnaryNode with Matchable {
+  override def output: Seq[Attribute] = outputList.map(_.toAttribute)
+}
+
+case class Select(
+    outputList: Seq[NamedExpression],
+    inputList: Seq[Expression],
+    predicateList: Seq[Expression],
+    aliasMap: Map[Int, String],
+    joinEdges: Seq[JoinEdge],
+    children: Seq[ModularPlan],
+    flags: FlagSet,
+    flagSpec: Seq[Seq[Any]],
+    windowSpec: Seq[Seq[Any]],
+    dataMapTableRelation: Option[ModularPlan] = None) extends ModularPlan with Matchable {
+  override def output: Seq[Attribute] = outputList.map(_.toAttribute)
+
+  override def adjacencyList: scala.collection.immutable.Map[Int, Seq[(Int, JoinType)]] = {
+    joinEdges.groupBy { _.left }.map { case (k, v) => (k, v.map(e => (e.right, e.joinType))) }
+  }
+
+  override def extractJoinConditions(
+      left: ModularPlan, right: ModularPlan): Seq[Expression] = {
+    predicateList.filter(p => p.references.intersect(left.outputSet).nonEmpty &&
+                              p.references.intersect(right.outputSet).nonEmpty &&
+                              p.references.subsetOf(left.outputSet ++ right.outputSet))
+  }
+
+  override def extractRightEvaluableConditions(
+      left: ModularPlan, right: ModularPlan): Seq[Expression] = {
+    predicateList.filter(p => p.references.subsetOf(left.outputSet ++ right.outputSet) &&
+                              p.references.intersect(right.outputSet).nonEmpty)
+  }
+
+  override def extractEvaluableConditions(plan: ModularPlan): Seq[Expression] = {
+    predicateList.filter(p => canEvaluate(p, plan))
+  }
+}
+
+case class Union(children: Seq[ModularPlan], flags: FlagSet, flagSpec: Seq[Seq[Any]])
+  extends ModularPlan {
+  override def output: Seq[Attribute] = children.head.output
+}
+
+case object OneRowTable extends LeafNode {
+  override def output: Seq[Attribute] = Nil
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/queryGraph.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/queryGraph.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/queryGraph.scala
new file mode 100644
index 0000000..da6bc15
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/queryGraph.scala
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.modular
+
+import org.apache.spark.annotation.DeveloperApi
+import org.apache.spark.sql.catalyst.plans.JoinType
+
+@DeveloperApi
+case class JoinEdge(left: Int, right: Int, joinType: JoinType)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/package.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/package.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/package.scala
new file mode 100644
index 0000000..8c799fe
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/package.scala
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv
+
+import org.apache.spark.annotation.DeveloperApi
+import org.apache.spark.sql.catalyst.expressions.{AttributeSet, Expression, PredicateHelper}
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+
+import org.apache.carbondata.mv.plans.modular.ModularPlan
+import org.apache.carbondata.mv.plans.util.{CheckSPJG, LogicalPlanSignatureGenerator, Signature}
+
+/**
+ * A a collection of common abstractions for query plans as well as
+ * a base semantic plan representation.
+ */
+package object plans {
+  @DeveloperApi
+  type Pattern = org.apache.carbondata.mv.plans.modular.ModularPattern
+
+  implicit class LogicalPlanUtils(val plan: LogicalPlan) {
+    lazy val isSPJG: Boolean = CheckSPJG.isSPJG(plan)
+    lazy val signature: Option[Signature] = LogicalPlanSignatureGenerator.generate(plan)
+  }
+
+  implicit class MorePredicateHelper(p: PredicateHelper) {
+    def canEvaluate(expr: Expression, plan: ModularPlan): Boolean = {
+      expr.references.subsetOf(plan.outputSet)
+    }
+
+    def canEvaluate(expr: Expression, exprList: Seq[Expression]): Boolean = {
+      expr.references.subsetOf(AttributeSet(exprList))
+    }
+  }
+
+  def supports(supported: Boolean, message: Any) {
+    if (!supported) {
+      throw new UnsupportedOperationException(s"unsupported operation: $message")
+    }
+  }
+}


[34/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
[CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Support Modular Core for Materialized View DataMap

This closes #2302


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/bf73e9fe
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/bf73e9fe
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/bf73e9fe

Branch: refs/heads/spark-2.3
Commit: bf73e9fe77523e23be46e7597e2c990e855401e5
Parents: d14c403
Author: ravipesala <ra...@gmail.com>
Authored: Sat May 12 22:49:19 2018 +0530
Committer: Jacky Li <ja...@qq.com>
Committed: Sun May 13 17:08:19 2018 +0800

----------------------------------------------------------------------
 datamap/mv/core/pom.xml                         |  169 ++
 .../carbondata/mv/datamap/MVAnalyzerRule.scala  |  105 +
 .../mv/datamap/MVDataMapProvider.scala          |  125 +
 .../apache/carbondata/mv/datamap/MVHelper.scala |  377 +++
 .../apache/carbondata/mv/datamap/MVState.scala  |   55 +
 .../mv/rewrite/DefaultMatchMaker.scala          |  647 +++++
 .../carbondata/mv/rewrite/MatchConditions.scala |   28 +
 .../carbondata/mv/rewrite/MatchMaker.scala      |   47 +
 .../carbondata/mv/rewrite/Navigator.scala       |  196 ++
 .../carbondata/mv/rewrite/QueryRewrite.scala    |   53 +
 .../mv/rewrite/SummaryDatasetCatalog.scala      |  168 ++
 .../apache/carbondata/mv/rewrite/Utils.scala    |  358 +++
 .../mv/rewrite/MVCreateTestCase.scala           |  676 +++++
 .../mv/rewrite/MVSampleTestCase.scala           |  167 ++
 .../carbondata/mv/rewrite/MVTPCDSTestCase.scala |  146 +
 .../carbondata/mv/rewrite/MVTpchTestCase.scala  |  247 ++
 .../SelectSelectExactChildrenSuite.scala        |   76 +
 .../carbondata/mv/rewrite/Tpcds_1_4_Suite.scala |   80 +
 .../mv/rewrite/matching/TestSQLBatch.scala      |  214 ++
 .../rewrite/matching/TestTPCDS_1_4_Batch.scala  | 2496 ++++++++++++++++++
 20 files changed, 6430 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/pom.xml
----------------------------------------------------------------------
diff --git a/datamap/mv/core/pom.xml b/datamap/mv/core/pom.xml
new file mode 100644
index 0000000..99a8e22
--- /dev/null
+++ b/datamap/mv/core/pom.xml
@@ -0,0 +1,169 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.carbondata</groupId>
+    <artifactId>carbondata-parent</artifactId>
+    <version>1.4.0-SNAPSHOT</version>
+    <relativePath>../../../pom.xml</relativePath>
+  </parent>
+
+  <artifactId>carbondata-mv-core</artifactId>
+  <name>Apache CarbonData :: Materialized View Core</name>
+
+  <properties>
+    <dev.path>${basedir}/../../../dev</dev.path>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.carbondata</groupId>
+      <artifactId>carbondata-mv-plan</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.carbondata</groupId>
+      <artifactId>carbondata-spark2</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.scalatest</groupId>
+      <artifactId>scalatest_${scala.binary.version}</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <testSourceDirectory>src/test/scala</testSourceDirectory>
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <configuration>
+          <source>1.8</source>
+          <target>1.8</target>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <version>2.18</version>
+        <!-- Note config is repeated in scalatest config -->
+        <configuration>
+          <skip>false</skip>
+          <reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
+          <argLine>-Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine>
+          <systemProperties>
+            <java.awt.headless>true</java.awt.headless>
+          </systemProperties>
+          <testFailureIgnore>false</testFailureIgnore>
+          <failIfNoTests>false</failIfNoTests>
+        </configuration>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <version>2.17</version>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.scala-tools</groupId>
+        <artifactId>maven-scala-plugin</artifactId>
+        <version>2.15.2</version>
+        <executions>
+          <execution>
+            <id>compile</id>
+            <goals>
+              <goal>compile</goal>
+            </goals>
+            <phase>compile</phase>
+          </execution>
+          <execution>
+            <id>testCompile</id>
+            <goals>
+              <goal>testCompile</goal>
+            </goals>
+            <phase>test</phase>
+          </execution>
+          <execution>
+            <phase>process-resources</phase>
+            <goals>
+              <goal>compile</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <version>1.4.1</version>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>com.ning.maven.plugins</groupId>
+        <artifactId>maven-duplicate-finder-plugin</artifactId>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.scalatest</groupId>
+        <artifactId>scalatest-maven-plugin</artifactId>
+        <version>1.0</version>
+        <!-- Note config is repeated in surefire config -->
+        <configuration>
+          <reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
+          <junitxml>.</junitxml>
+          <testFailureIgnore>false</testFailureIgnore>
+          <filereports>CarbonTestSuite.txt</filereports>
+          <argLine>-ea -Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m
+          </argLine>
+          <stderr />
+          <environmentVariables>
+          </environmentVariables>
+          <systemProperties>
+            <java.awt.headless>true</java.awt.headless>
+          </systemProperties>
+        </configuration>
+        <executions>
+          <execution>
+            <id>test</id>
+            <goals>
+              <goal>test</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+  <profiles>
+  <profile>
+    <id>sdvtest</id>
+    <properties>
+      <maven.test.skip>true</maven.test.skip>
+    </properties>
+  </profile>
+  </profiles>
+</project>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVAnalyzerRule.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVAnalyzerRule.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVAnalyzerRule.scala
new file mode 100644
index 0000000..4e93f15
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVAnalyzerRule.scala
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.mv.datamap
+
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.analysis.{UnresolvedAlias, UnresolvedAttribute}
+import org.apache.spark.sql.catalyst.expressions.{Alias, ScalaUDF}
+import org.apache.spark.sql.catalyst.plans.logical.{Command, DeserializeToObject, LogicalPlan}
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.execution.datasources.LogicalRelation
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.datamap.DataMapStoreManager
+import org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider
+import org.apache.carbondata.core.metadata.schema.table.DataMapSchema
+import org.apache.carbondata.datamap.DataMapManager
+import org.apache.carbondata.mv.rewrite.{SummaryDataset, SummaryDatasetCatalog}
+
+/**
+ * Analyzer rule to rewrite the query for MV datamap
+ *
+ * @param sparkSession
+ */
+class MVAnalyzerRule(sparkSession: SparkSession) extends Rule[LogicalPlan] {
+
+  // TODO Find way better way to get the provider.
+  private val dataMapProvider =
+    DataMapManager.get().getDataMapProvider(null,
+      new DataMapSchema("", DataMapClassProvider.MV.getShortName), sparkSession)
+
+  private val LOGGER = LogServiceFactory.getLogService(classOf[MVAnalyzerRule].getName)
+
+  override def apply(plan: LogicalPlan): LogicalPlan = {
+    var needAnalysis = true
+    plan.transformAllExpressions {
+      // first check if any preAgg scala function is applied it is present is in plan
+      // then call is from create preaggregate table class so no need to transform the query plan
+      // TODO Add different UDF name
+      case al@Alias(udf: ScalaUDF, name) if name.equalsIgnoreCase("preAgg") =>
+        needAnalysis = false
+        al
+      // in case of query if any unresolve alias is present then wait for plan to be resolved
+      // return the same plan as we can tranform the plan only when everything is resolved
+      case unresolveAlias@UnresolvedAlias(_, _) =>
+        needAnalysis = false
+        unresolveAlias
+      case attr@UnresolvedAttribute(_) =>
+        needAnalysis = false
+        attr
+    }
+    val catalog = DataMapStoreManager.getInstance().getDataMapCatalog(dataMapProvider,
+      DataMapClassProvider.MV.getShortName).asInstanceOf[SummaryDatasetCatalog]
+    if (needAnalysis && catalog != null && isValidPlan(plan, catalog)) {
+      val modularPlan = catalog.mVState.rewritePlan(plan).withSummaryData
+      if (modularPlan.find (_.rewritten).isDefined) {
+        val compactSQL = modularPlan.asCompactSQL
+        LOGGER.audit(s"\n$compactSQL\n")
+        val analyzed = sparkSession.sql(compactSQL).queryExecution.analyzed
+        analyzed
+      } else {
+        plan
+      }
+    } else {
+      plan
+    }
+  }
+
+  def isValidPlan(plan: LogicalPlan, catalog: SummaryDatasetCatalog): Boolean = {
+    !plan.isInstanceOf[Command] && !isDataMapExists(plan, catalog.listAllSchema()) &&
+    !plan.isInstanceOf[DeserializeToObject]
+  }
+  /**
+   * Check whether datamap table already updated in the query.
+   *
+   * @param plan
+   * @param mvs
+   * @return
+   */
+  def isDataMapExists(plan: LogicalPlan, mvs: Array[SummaryDataset]): Boolean = {
+    val catalogs = plan collect {
+      case l: LogicalRelation => l.catalogTable
+    }
+    catalogs.isEmpty || catalogs.exists { c =>
+      mvs.exists { mv =>
+        val identifier = mv.dataMapSchema.getRelationIdentifier
+        identifier.getTableName.equals(c.get.identifier.table) &&
+        identifier.getDatabaseName.equals(c.get.database)
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVDataMapProvider.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVDataMapProvider.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVDataMapProvider.scala
new file mode 100644
index 0000000..2aba23e
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVDataMapProvider.scala
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.mv.datamap
+
+import java.io.IOException
+
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.plans.logical.SubqueryAlias
+import org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand
+import org.apache.spark.sql.execution.command.table.CarbonDropTableCommand
+import org.apache.spark.sql.execution.datasources.FindDataSourceTable
+import org.apache.spark.sql.parser.CarbonSpark2SqlParser
+import org.apache.spark.sql.util.SparkSQLUtil
+
+import org.apache.carbondata.common.annotations.InterfaceAudience
+import org.apache.carbondata.common.exceptions.sql.MalformedDataMapCommandException
+import org.apache.carbondata.core.datamap.{DataMapCatalog, DataMapProvider, DataMapStoreManager}
+import org.apache.carbondata.core.datamap.dev.{DataMap, DataMapFactory}
+import org.apache.carbondata.core.indexstore.Blocklet
+import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, DataMapSchema}
+import org.apache.carbondata.mv.rewrite.{SummaryDataset, SummaryDatasetCatalog}
+
+@InterfaceAudience.Internal
+class MVDataMapProvider(
+    mainTable: CarbonTable,
+    sparkSession: SparkSession,
+    dataMapSchema: DataMapSchema)
+  extends DataMapProvider(mainTable, dataMapSchema) {
+  protected var dropTableCommand: CarbonDropTableCommand = null
+
+  @throws[MalformedDataMapCommandException]
+  @throws[IOException]
+  override def initMeta(ctasSqlStatement: String): Unit = {
+    if (ctasSqlStatement == null) {
+      throw new MalformedDataMapCommandException(
+        "select statement is mandatory")
+    }
+    MVHelper.createMVDataMap(sparkSession, dataMapSchema, ctasSqlStatement, true)
+    DataMapStoreManager.getInstance.registerDataMapCatalog(this, dataMapSchema)
+  }
+
+  override def initData(): Unit = {
+  }
+
+  @throws[IOException]
+  override def cleanMeta(): Unit = {
+    dropTableCommand = new CarbonDropTableCommand(true,
+      new Some[String](dataMapSchema.getRelationIdentifier.getDatabaseName),
+      dataMapSchema.getRelationIdentifier.getTableName,
+      true)
+    dropTableCommand.processMetadata(sparkSession)
+    DataMapStoreManager.getInstance.unRegisterDataMapCatalog(dataMapSchema)
+    DataMapStoreManager.getInstance().dropDataMapSchema(dataMapSchema.getDataMapName)
+  }
+
+  override def cleanData(): Unit = {
+    if (dropTableCommand != null) {
+      dropTableCommand.processData(sparkSession)
+    }
+  }
+
+  @throws[IOException]
+  override def rebuild(): Unit = {
+    val ctasQuery = dataMapSchema.getCtasQuery
+    if (ctasQuery != null) {
+      val identifier = dataMapSchema.getRelationIdentifier
+      val logicalPlan =
+        new FindDataSourceTable(sparkSession).apply(
+          sparkSession.sessionState.catalog.lookupRelation(
+          TableIdentifier(identifier.getTableName,
+            Some(identifier.getDatabaseName)))) match {
+          case s: SubqueryAlias => s.child
+          case other => other
+        }
+      val updatedQuery = new CarbonSpark2SqlParser().addPreAggFunction(ctasQuery)
+      val queryPlan = SparkSQLUtil.execute(
+        sparkSession.sql(updatedQuery).queryExecution.analyzed,
+        sparkSession).drop("preAgg")
+      val header = logicalPlan.output.map(_.name).mkString(",")
+      val loadCommand = CarbonLoadDataCommand(
+        databaseNameOp = Some(identifier.getDatabaseName),
+        tableName = identifier.getTableName,
+        factPathFromUser = null,
+        dimFilesPath = Seq(),
+        options = scala.collection.immutable.Map("fileheader" -> header),
+        isOverwriteTable = true,
+        inputSqlString = null,
+        dataFrame = Some(queryPlan),
+        updateModel = None,
+        tableInfoOp = None,
+        internalOptions = Map.empty,
+        partition = Map.empty)
+
+      SparkSQLUtil.execute(loadCommand, sparkSession)
+    }
+  }
+
+  @throws[IOException]
+  override def incrementalBuild(
+      segmentIds: Array[String]): Unit = {
+    throw new UnsupportedOperationException
+  }
+
+  override def createDataMapCatalog : DataMapCatalog[SummaryDataset] =
+    new SummaryDatasetCatalog(sparkSession)
+
+  override def getDataMapFactory: DataMapFactory[_ <: DataMap[_ <: Blocklet]] = {
+    throw new UnsupportedOperationException
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
new file mode 100644
index 0000000..0f9362f
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVHelper.scala
@@ -0,0 +1,377 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.mv.datamap
+
+import java.util
+
+import scala.collection.JavaConverters._
+import scala.collection.mutable
+
+import org.apache.spark.sql.{CarbonEnv, SparkSession}
+import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.catalog.CatalogTable
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeReference, AttributeSet, Expression, NamedExpression, ScalaUDF}
+import org.apache.spark.sql.catalyst.expressions.aggregate._
+import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan, Project}
+import org.apache.spark.sql.execution.command.{Field, TableModel, TableNewProcessor}
+import org.apache.spark.sql.execution.command.table.CarbonCreateTableCommand
+import org.apache.spark.sql.execution.datasources.LogicalRelation
+import org.apache.spark.sql.parser.CarbonSpark2SqlParser
+
+import org.apache.carbondata.core.datamap.DataMapStoreManager
+import org.apache.carbondata.core.metadata.schema.table.{DataMapSchema, DataMapSchemaStorageProvider, RelationIdentifier}
+import org.apache.carbondata.mv.plans.modular
+import org.apache.carbondata.mv.plans.modular.{GroupBy, Matchable, ModularPlan, Select}
+import org.apache.carbondata.mv.rewrite.{DefaultMatchMaker, QueryRewrite}
+import org.apache.carbondata.spark.util.CommonUtil
+
+/**
+ * Utility for MV datamap operations.
+ */
+object MVHelper {
+
+  def createMVDataMap(sparkSession: SparkSession,
+      dataMapSchema: DataMapSchema,
+      queryString: String,
+      ifNotExistsSet: Boolean = false): Unit = {
+    val dmProperties = dataMapSchema.getProperties.asScala
+    val updatedQuery = new CarbonSpark2SqlParser().addPreAggFunction(queryString)
+    val logicalPlan = sparkSession.sql(updatedQuery).drop("preAgg").queryExecution.analyzed
+    val fields = logicalPlan.output.map { attr =>
+      val name = updateColumnName(attr)
+      val rawSchema = '`' + name + '`' + ' ' + attr.dataType.typeName
+      if (attr.dataType.typeName.startsWith("decimal")) {
+        val (precision, scale) = CommonUtil.getScaleAndPrecision(attr.dataType.catalogString)
+        Field(column = name,
+          dataType = Some(attr.dataType.typeName),
+          name = Some(name),
+          children = None,
+          precision = precision,
+          scale = scale,
+          rawSchema = rawSchema)
+      } else {
+        Field(column = name,
+          dataType = Some(attr.dataType.typeName),
+          name = Some(name),
+          children = None,
+          rawSchema = rawSchema)
+      }
+    }
+    val tableProperties = mutable.Map[String, String]()
+    dmProperties.foreach(t => tableProperties.put(t._1, t._2))
+
+    val selectTables = getTables(logicalPlan)
+
+    // TODO inherit the table properties like sort order, sort scope and block size from parent
+    // tables to mv datamap table
+    // TODO Use a proper DB
+    val tableIdentifier =
+    TableIdentifier(dataMapSchema.getDataMapName + "_table",
+      selectTables.head.identifier.database)
+    // prepare table model of the collected tokens
+    val tableModel: TableModel = new CarbonSpark2SqlParser().prepareTableModel(
+      ifNotExistPresent = ifNotExistsSet,
+      new CarbonSpark2SqlParser().convertDbNameToLowerCase(tableIdentifier.database),
+      tableIdentifier.table.toLowerCase,
+      fields,
+      Seq(),
+      tableProperties,
+      None,
+      isAlterFlow = false,
+      None)
+
+    val tablePath = if (dmProperties.contains("path")) {
+      dmProperties("path")
+    } else {
+      CarbonEnv.getTablePath(tableModel.databaseNameOp, tableModel.tableName)(sparkSession)
+    }
+    CarbonCreateTableCommand(TableNewProcessor(tableModel),
+      tableModel.ifNotExistsSet, Some(tablePath), isVisible = false).run(sparkSession)
+
+    dataMapSchema.setCtasQuery(queryString)
+    dataMapSchema
+      .setRelationIdentifier(new RelationIdentifier(tableIdentifier.database.get,
+        tableIdentifier.table,
+        ""))
+
+    val parentIdents = selectTables.map { table =>
+      new RelationIdentifier(table.database, table.identifier.table, "")
+    }
+    dataMapSchema.setParentTables(new util.ArrayList[RelationIdentifier](parentIdents.asJava))
+    DataMapStoreManager.getInstance().saveDataMapSchema(dataMapSchema)
+  }
+
+  def updateColumnName(attr: Attribute): String = {
+    val name = attr.name.replace("(", "_").replace(")", "").replace(" ", "_").replace("=", "")
+    attr.qualifier.map(qualifier => qualifier + "_" + name).getOrElse(name)
+  }
+
+  def getTables(logicalPlan: LogicalPlan): Seq[CatalogTable] = {
+    logicalPlan.collect {
+      case l: LogicalRelation => l.catalogTable.get
+    }
+  }
+
+  def dropDummFuc(plan: LogicalPlan): LogicalPlan = {
+    plan transform {
+      case p@Project(exps, child) =>
+        Project(dropDummyExp(exps), child)
+      case Aggregate(grp, aggExp, child) =>
+        Aggregate(
+          grp,
+          dropDummyExp(aggExp),
+          child)
+    }
+  }
+
+  private def dropDummyExp(exps: Seq[NamedExpression]) = {
+    exps.map {
+      case al@Alias(udf: ScalaUDF, name) if name.equalsIgnoreCase("preAgg") => None
+      case attr: AttributeReference if attr.name.equalsIgnoreCase("preAgg") => None
+      case other => Some(other)
+    }.filter(_.isDefined).map(_.get)
+  }
+
+  def getAttributeMap(subsumer: Seq[NamedExpression],
+      subsume: Seq[NamedExpression]): Map[AttributeKey, NamedExpression] = {
+    if (subsumer.length == subsume.length) {
+      subsume.zip(subsumer).flatMap { case (left, right) =>
+        var tuples = left collect {
+          case attr: AttributeReference =>
+            (AttributeKey(attr), createAttrReference(right, attr.name))
+        }
+        left match {
+          case a: Alias =>
+            tuples = Seq((AttributeKey(a.child), createAttrReference(right, a.name))) ++ tuples
+          case _ =>
+        }
+        Seq((AttributeKey(left), createAttrReference(right, left.name))) ++ tuples
+      }.toMap
+    } else {
+      throw new UnsupportedOperationException("Cannot create mapping with unequal sizes")
+    }
+  }
+
+  def createAttrReference(ref: NamedExpression, name: String): Alias = {
+    Alias(ref, name)(exprId = ref.exprId, qualifier = None)
+  }
+
+  case class AttributeKey(exp: Expression) {
+
+    override def equals(other: Any): Boolean = other match {
+      case attrKey: AttributeKey =>
+        exp.semanticEquals(attrKey.exp)
+      case _ => false
+    }
+
+    override def hashCode: Int = exp.hashCode
+
+  }
+
+  /**
+   * Updates the expressions as per the subsumer output expressions. It is needed to update the
+   * expressions as per the datamap table relation
+   *
+   * @param expressions        expressions which are needed to update
+   * @param aliasName          table alias name
+   * @return Updated expressions
+   */
+  def updateSubsumeAttrs(
+      expressions: Seq[Expression],
+      attrMap: Map[AttributeKey, NamedExpression],
+      aliasName: Option[String],
+      keepAlias: Boolean = false): Seq[Expression] = {
+
+    def getAttribute(exp: Expression) = {
+      exp match {
+        case Alias(agg: AggregateExpression, name) =>
+          agg.aggregateFunction.collect {
+            case attr: AttributeReference =>
+              AttributeReference(attr.name, attr.dataType, attr.nullable, attr
+                .metadata)(attr.exprId,
+                aliasName,
+                attr.isGenerated)
+          }.head
+        case Alias(child, name) =>
+          child
+        case other => other
+      }
+    }
+
+    expressions.map {
+        case alias@Alias(agg: AggregateExpression, name) =>
+          attrMap.get(AttributeKey(alias)).map { exp =>
+            Alias(getAttribute(exp), name)(alias.exprId,
+              alias.qualifier,
+              alias.explicitMetadata,
+              alias.isGenerated)
+          }.getOrElse(alias)
+
+        case attr: AttributeReference =>
+          val uattr = attrMap.get(AttributeKey(attr)).map{a =>
+            if (keepAlias) {
+              AttributeReference(a.name, a.dataType, a.nullable, a.metadata)(a.exprId,
+                attr.qualifier,
+                a.isGenerated)
+            } else {
+              a
+            }
+          }.getOrElse(attr)
+          uattr
+        case expression: Expression =>
+          val uattr = attrMap.getOrElse(AttributeKey(expression), expression)
+          uattr
+    }
+  }
+
+  def updateOutPutList(
+      subsumerOutputList: Seq[NamedExpression],
+      dataMapRltn: Select,
+      aliasMap: Map[AttributeKey, NamedExpression],
+      keepAlias: Boolean): Seq[NamedExpression] = {
+    var outputSel =
+      updateSubsumeAttrs(
+        subsumerOutputList,
+        aliasMap,
+        Some(dataMapRltn.aliasMap.values.head),
+        keepAlias).asInstanceOf[Seq[NamedExpression]]
+    outputSel.zip(subsumerOutputList).map{ case (l, r) =>
+      l match {
+        case attr: AttributeReference =>
+          Alias(attr, r.name)(r.exprId, None)
+        case a@Alias(attr: AttributeReference, name) =>
+          Alias(attr, r.name)(r.exprId, None)
+        case other => other
+      }
+    }
+
+  }
+
+  def updateSelectPredicates(
+      predicates: Seq[Expression],
+      attrMap: Map[AttributeKey, NamedExpression],
+      keepAlias: Boolean): Seq[Expression] = {
+    predicates.map { exp =>
+      exp transform {
+        case attr: AttributeReference =>
+          val uattr = attrMap.get(AttributeKey(attr)).map{a =>
+            if (keepAlias) {
+              AttributeReference(a.name, a.dataType, a.nullable, a.metadata)(a.exprId,
+                attr.qualifier,
+                a.isGenerated)
+            } else {
+              a
+            }
+          }.getOrElse(attr)
+          uattr
+      }
+    }
+  }
+
+  /**
+   * Update the modular plan as per the datamap table relation inside it.
+   *
+   * @param subsumer plan to be updated
+   * @return Updated modular plan.
+   */
+  def updateDataMap(subsumer: ModularPlan, rewrite: QueryRewrite): ModularPlan = {
+    subsumer match {
+      case s: Select if s.dataMapTableRelation.isDefined =>
+        val relation = s.dataMapTableRelation.get.asInstanceOf[Select]
+        val mappings = s.outputList zip relation.outputList
+        val oList = for ((o1, o2) <- mappings) yield {
+          if (o1.name != o2.name) Alias(o2, o1.name)(exprId = o1.exprId) else o2
+        }
+        relation.copy(outputList = oList).setRewritten()
+      case g: GroupBy if g.dataMapTableRelation.isDefined =>
+        val relation = g.dataMapTableRelation.get.asInstanceOf[Select]
+        val in = relation.asInstanceOf[Select].outputList
+        val mappings = g.outputList zip relation.outputList
+        val oList = for ((left, right) <- mappings) yield {
+          left match {
+            case Alias(agg@AggregateExpression(fun@Sum(child), _, _, _), name) =>
+              val uFun = fun.copy(child = right)
+              Alias(agg.copy(aggregateFunction = uFun), left.name)(exprId = left.exprId)
+            case Alias(agg@AggregateExpression(fun@Max(child), _, _, _), name) =>
+              val uFun = fun.copy(child = right)
+              Alias(agg.copy(aggregateFunction = uFun), left.name)(exprId = left.exprId)
+            case Alias(agg@AggregateExpression(fun@Min(child), _, _, _), name) =>
+              val uFun = fun.copy(child = right)
+              Alias(agg.copy(aggregateFunction = uFun), left.name)(exprId = left.exprId)
+            case Alias(agg@AggregateExpression(fun@Count(Seq(child)), _, _, _), name) =>
+              val uFun = Sum(right)
+              Alias(agg.copy(aggregateFunction = uFun), left.name)(exprId = left.exprId)
+            case _ =>
+              if (left.name != right.name) Alias(right, left.name)(exprId = left.exprId) else right
+          }
+        }
+        val updatedPredicates = g.predicateList.map { f =>
+          mappings.find{ case (k, y) =>
+            k match {
+              case a: Alias if f.isInstanceOf[Alias] =>
+                a.child.semanticEquals(f.children.head)
+              case a: Alias => a.child.semanticEquals(f)
+              case other => other.semanticEquals(f)
+            }
+          } match {
+            case Some(r) => r._2
+            case _ => f
+          }
+        }
+        g.copy(outputList = oList,
+          inputList = in,
+          predicateList = updatedPredicates,
+          child = relation,
+          dataMapTableRelation = None).setRewritten()
+
+      case select: Select =>
+        select.children match {
+          case Seq(s: Select) if s.dataMapTableRelation.isDefined =>
+            val relation = s.dataMapTableRelation.get.asInstanceOf[Select]
+            val child = updateDataMap(s, rewrite).asInstanceOf[Select]
+            val aliasMap = getAttributeMap(relation.outputList, s.outputList)
+            var outputSel =
+              updateOutPutList(select.outputList, relation, aliasMap, keepAlias = true)
+            val pred = updateSelectPredicates(select.predicateList, aliasMap, true)
+            select.copy(outputList = outputSel,
+              inputList = child.outputList,
+              predicateList = pred,
+              children = Seq(child)).setRewritten()
+
+          case Seq(g: GroupBy) if g.dataMapTableRelation.isDefined =>
+            val relation = g.dataMapTableRelation.get.asInstanceOf[Select]
+            val aliasMap = getAttributeMap(relation.outputList, g.outputList)
+
+            val outputSel =
+              updateOutPutList(select.outputList, relation, aliasMap, keepAlias = false)
+            val child = updateDataMap(g, rewrite).asInstanceOf[Matchable]
+            // TODO Remove the unnecessary columns from selection.
+            // Only keep columns which are required by parent.
+            val inputSel = child.outputList
+            select.copy(
+              outputList = outputSel,
+              inputList = inputSel,
+              children = Seq(child)).setRewritten()
+
+          case _ => select
+        }
+
+      case other => other
+    }
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVState.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVState.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVState.scala
new file mode 100644
index 0000000..412d547
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVState.scala
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.datamap
+
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+
+import org.apache.carbondata.mv.plans.modular.SimpleModularizer
+import org.apache.carbondata.mv.plans.util.BirdcageOptimizer
+import org.apache.carbondata.mv.rewrite.{DefaultMatchMaker, Navigator, QueryRewrite, SummaryDatasetCatalog}
+
+/**
+ * A class that holds all session-specific state.
+ */
+private[mv] class MVState(summaryDatasetCatalog: SummaryDatasetCatalog) {
+
+  // Note: These are all lazy vals because they depend on each other (e.g. conf) and we
+  // want subclasses to override some of the fields. Otherwise, we would get a lot of NPEs.
+
+  /**
+   * Modular query plan modularizer
+   */
+  lazy val modularizer = SimpleModularizer
+
+  /**
+   * Logical query plan optimizer.
+   */
+  lazy val optimizer = BirdcageOptimizer
+
+  lazy val matcher = DefaultMatchMaker
+
+  lazy val navigator: Navigator = new Navigator(summaryDatasetCatalog, this)
+
+  /**
+   * Rewrite the logical query plan to MV plan if applicable.
+   * @param plan
+   * @return
+   */
+  def rewritePlan(plan: LogicalPlan): QueryRewrite = new QueryRewrite(this, plan)
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/DefaultMatchMaker.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/DefaultMatchMaker.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/DefaultMatchMaker.scala
new file mode 100644
index 0000000..899c36c
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/DefaultMatchMaker.scala
@@ -0,0 +1,647 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.rewrite
+
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeMap, AttributeReference, AttributeSet, Expression, PredicateHelper, _}
+import org.apache.spark.sql.catalyst.plans.{FullOuter, Inner}
+
+import org.apache.carbondata.mv.datamap.MVHelper
+import org.apache.carbondata.mv.plans.modular
+import org.apache.carbondata.mv.plans.modular._
+import org.apache.carbondata.mv.plans.modular.Flags._
+import org.apache.carbondata.mv.plans.util.SQLBuilder
+
+abstract class DefaultMatchMaker extends MatchMaker[ModularPlan]
+
+abstract class DefaultMatchPattern extends MatchPattern[ModularPlan] {
+
+  /** Name for this pattern, automatically inferred based on class name. */
+  val patternName: String = {
+    val className = getClass.getName
+    if (className endsWith "$") className.dropRight(1) else className
+  }
+
+  def factorOutSubsumer(
+      compensation: ModularPlan,
+      subsumer: Matchable,
+      aliasMapMain: Map[Int, String]): ModularPlan = {
+
+    // Create aliasMap with attribute to alias reference attribute
+    val aliasMap = AttributeMap(
+        subsumer.outputList.collect {
+          case a: Alias if a.child.isInstanceOf[Attribute] =>
+            (a.child.asInstanceOf[Attribute], a.toAttribute)
+          })
+
+    // Check and replace all alias references with subsumer alias map references.
+    val compensation1 = compensation.transform {
+      case plan if !plan.skip && plan != subsumer =>
+        plan.transformExpressions {
+          case a: AttributeReference =>
+            aliasMap
+              .get(a)
+              .map { ref =>
+                AttributeReference(
+                  ref.name, ref.dataType)(
+                  exprId = ref.exprId,
+                  qualifier = a.qualifier)
+              }.getOrElse(a)
+          }
+    }
+
+    val subqueryAttributeSet = SQLBuilder.collectAttributeSet(subsumer.outputList)
+    if (SQLBuilder.collectDuplicateNames(subqueryAttributeSet).nonEmpty) {
+      new UnsupportedOperationException(
+        s"duplicate name(s): ${ subsumer.output.map(_.toString + ", ") }")
+    }
+    if (aliasMapMain.size == 1) {
+      val subsumerName: Option[String] = aliasMapMain.get(0)
+      // Replace all compensation1 attributes with refrences of subsumer attributeset
+      val compensationFinal = compensation1.transformExpressions {
+        case ref: Attribute if subqueryAttributeSet.contains(ref) =>
+          AttributeReference(ref.name, ref.dataType)(exprId = ref.exprId, qualifier = subsumerName)
+        case alias: Alias if subqueryAttributeSet.contains(alias.toAttribute) =>
+          Alias(alias.child, alias.name)(exprId = alias.exprId, qualifier = subsumerName)
+      }
+      compensationFinal
+    } else {
+      compensation1
+    }
+  }
+}
+
+object DefaultMatchMaker extends DefaultMatchMaker {
+  lazy val patterns =
+    SelectSelectNoChildDelta ::
+    GroupbyGroupbyNoChildDelta ::
+    GroupbyGroupbySelectOnlyChildDelta ::
+    GroupbyGroupbyGroupbyChildDelta ::
+    SelectSelectSelectChildDelta ::
+    SelectSelectGroupbyChildDelta :: Nil
+}
+
+/**
+ * Convention:
+ * EmR: each subsumee's expression match some of subsumer's expression
+ * EdR: each subsumee's expression derive from some of subsumer's expression
+ * RmE: each subsumer's expression match some of subsumee's expression
+ * RdE: each subsumer's expression derive from some of subsumee's expression
+ */
+
+object SelectSelectNoChildDelta extends DefaultMatchPattern with PredicateHelper {
+  private def isDerivable(
+      exprE: Expression,
+      exprListR: Seq[Expression],
+      subsumee: ModularPlan,
+      subsumer: ModularPlan,
+      compensation: Option[ModularPlan]): Boolean = {
+    if (subsumee.asInstanceOf[Select].predicateList.contains(exprE)) {
+      subsumer.asInstanceOf[Select].predicateList.exists(_.semanticEquals(exprE)) ||
+      canEvaluate(exprE, subsumer)
+    } else if (subsumee.asInstanceOf[Select].outputList.contains(exprE)) {
+      exprE match {
+        case a@Alias(_, _) =>
+          exprListR.exists(a1 => a1.isInstanceOf[Alias] &&
+                                 a1.asInstanceOf[Alias].child.semanticEquals(a.child)) ||
+          exprListR.exists(_.semanticEquals(exprE) || canEvaluate(exprE, subsumer))
+        case exp => exprListR.exists(_.semanticEquals(exp) || canEvaluate(exp, subsumer))
+      }
+    } else {
+      false
+    }
+  }
+
+  def apply(
+      subsumer: ModularPlan,
+      subsumee: ModularPlan,
+      compensation: Option[ModularPlan],
+      rewrite: QueryRewrite): Seq[ModularPlan] = {
+
+    (subsumer, subsumee, compensation) match {
+      case (
+          sel_1a @ modular.Select(_, _, _, _, _, _, _, _, _, _),
+          sel_1q @ modular.Select(_, _, _, _, _, _, _, _, _, _), None
+        ) if sel_1a.children.forall { _.isInstanceOf[modular.LeafNode] } &&
+             sel_1q.children.forall { _.isInstanceOf[modular.LeafNode] } =>
+
+        // assume children (including harmonized relation) of subsumer and subsumee
+        // are 1-1 correspondence.
+        // Change the following two conditions to more complicated ones if we want to
+        // consider things that combine extrajoin, rejoin, and harmonized relations
+        val isUniqueRmE = subsumer.children.filter { x => subsumee.children.count(_ == x) != 1 }
+        val isUniqueEmR = subsumee.children.filter { x => subsumer.children.count(_ == x) != 1 }
+
+        val extrajoin = sel_1a.children.filterNot { child => sel_1q.children.contains(child) }
+        val rejoin = sel_1q.children.filterNot { child => sel_1a.children.contains(child) }
+        val rejoinOutputList = rejoin.flatMap(_.output)
+
+        val isPredicateRmE = sel_1a.predicateList.forall(expr =>
+          sel_1q.predicateList.exists(_.semanticEquals(expr)))
+        val isPredicateEmdR = sel_1q.predicateList.forall(expr =>
+          isDerivable(expr, sel_1a.outputList ++ rejoinOutputList, sel_1q, sel_1a, None))
+        val isOutputEdR = sel_1q.outputList.forall(expr =>
+          isDerivable(expr, sel_1a.outputList ++ rejoinOutputList, sel_1q, sel_1a, None))
+
+        if (isUniqueRmE.isEmpty && isUniqueEmR.isEmpty && extrajoin.isEmpty && isPredicateRmE &&
+            isPredicateEmdR && isOutputEdR) {
+          val mappings = sel_1a.children.zipWithIndex.map {
+            case (childr, fromIdx) if sel_1q.children.contains(childr) =>
+              val toIndx = sel_1q.children.indexWhere(_ == childr)
+              (toIndx -> fromIdx)
+
+          }
+          val e2r = mappings.toMap
+          val r2e = e2r.map(_.swap)
+          val r2eJoinsMatch = sel_1a.joinEdges.forall { x =>
+              (r2e.get(x.left), r2e.get(x.right)) match {
+                case (Some(l), Some(r)) =>
+                  val mappedEdge = JoinEdge(l, r, x.joinType)
+                  val joinTypeEquivalent =
+                    if (sel_1q.joinEdges.contains(mappedEdge)) true
+                    else {
+                      x.joinType match {
+                        case Inner | FullOuter =>
+                          sel_1q.joinEdges.contains(JoinEdge(r, l, x.joinType))
+                        case _ => false
+                      }
+                    }
+                  if (joinTypeEquivalent) {
+                    val sel_1a_join = sel_1a.extractJoinConditions(
+                      sel_1a.children(x.left),
+                      sel_1a.children(x.right))
+                    val sel_1q_join = sel_1q.extractJoinConditions(
+                      sel_1q.children(mappedEdge.left),
+                      sel_1q.children(mappedEdge.right))
+                    sel_1a_join.forall(e => sel_1q_join.exists(e.semanticEquals(_))) &&
+                    sel_1q_join.forall(e => sel_1a_join.exists(e.semanticEquals(_)))
+                  } else false
+                case _ => false
+              }
+          }
+
+          val isPredicateEmR = sel_1q.predicateList.forall(expr =>
+            sel_1a.predicateList.exists(_.semanticEquals(expr)))
+          val isOutputEmR = sel_1q.outputList.forall(expr =>
+            sel_1a.outputList.exists(_.semanticEquals(expr)))
+          val isOutputRmE = sel_1a.outputList.forall(expr =>
+            sel_1q.outputList.exists(_.semanticEquals(expr)))
+
+          if (r2eJoinsMatch) {
+            if (isPredicateEmR && isOutputEmR && isOutputRmE && rejoin.isEmpty) {
+              Seq(sel_1a) // no compensation needed
+            } else {
+              val tChildren = new collection.mutable.ArrayBuffer[ModularPlan]()
+              val tAliasMap = new collection.mutable.HashMap[Int, String]()
+
+              val updatedOutList: Seq[NamedExpression] = updateDuplicateColumns(sel_1a)
+              val usel_1a = sel_1a.copy(outputList = updatedOutList)
+              tChildren += usel_1a
+              tAliasMap += (tChildren.indexOf(usel_1a) -> rewrite.newSubsumerName())
+
+              sel_1q.children.zipWithIndex.foreach {
+                case (childe, idx) =>
+                  if (e2r.get(idx).isEmpty) {
+                    tChildren += childe
+                    sel_1q.aliasMap.get(idx).map(x => tAliasMap += (tChildren.indexOf(childe) -> x))
+                  }
+              }
+
+              val tJoinEdges = sel_1q.joinEdges.collect {
+                case JoinEdge(le, re, joinType) =>
+                  (e2r.get(le), e2r.get(re)) match {
+                    case (Some(lr), None) =>
+                      JoinEdge(
+                        0,
+                        tChildren.indexOf(sel_1q.children(re)),
+                        joinType)
+                    case (None, None) =>
+                      JoinEdge(
+                        tChildren.indexOf(sel_1q.children(le)),
+                        tChildren.indexOf(sel_1q.children(re)),
+                        joinType)
+                    case (None, Some(rr)) =>
+                      JoinEdge(
+                        tChildren.indexOf(sel_1q.children(le)),
+                        0,
+                        joinType)
+                    case _ =>
+                      null.asInstanceOf[JoinEdge]
+                  }
+              }
+              val tPredicateList = sel_1q.predicateList.filter { p =>
+                !sel_1a.predicateList.exists(_.semanticEquals(p)) }
+                val wip = sel_1q.copy(
+                  predicateList = tPredicateList,
+                  children = tChildren,
+                  joinEdges = tJoinEdges.filter(_ != null),
+                  aliasMap = tAliasMap.toMap)
+
+                val done = factorOutSubsumer(wip, usel_1a, wip.aliasMap)
+                Seq(done)
+            }
+          } else Nil
+        } else Nil
+
+      case (
+        sel_3a @ modular.Select(_, _, _, _, _, _, _, _, _, _),
+        sel_3q @ modular.Select(_, _, _, _, _, _, _, _, _, _), None)
+        if sel_3a.children.forall(_.isInstanceOf[GroupBy]) &&
+           sel_3q.children.forall(_.isInstanceOf[GroupBy]) =>
+        val isPredicateRmE = sel_3a.predicateList.isEmpty ||
+                             sel_3a.predicateList.forall(expr =>
+                               sel_3q.predicateList.exists(_.semanticEquals(expr)))
+        val isPredicateEmdR = sel_3q.predicateList.isEmpty ||
+                              sel_3q.predicateList.forall(expr =>
+                                sel_3a.predicateList.exists(_.semanticEquals(expr)) ||
+                                isDerivable(expr, sel_3a.outputList, sel_3q, sel_3a, None))
+        val isOutputEdR = sel_3q.outputList.forall(expr =>
+          isDerivable(expr, sel_3a.outputList, sel_3q, sel_3a, None))
+        val isSingleChild = sel_3a.children.length == 1 && sel_3q.children.length == 1
+
+        if (isPredicateRmE && isPredicateEmdR && isOutputEdR && isSingleChild) {
+          val isPredicateEmR = sel_3q.predicateList.isEmpty ||
+                               sel_3q.predicateList.forall(expr =>
+                                 sel_3a.predicateList.exists(_.semanticEquals(expr)))
+          val isOutputRmE = sel_3a.outputList.forall(expr =>
+            isDerivable(expr, sel_3q.outputList, sel_3a, sel_3q, None))
+          val isOutputEmR = sel_3q.outputList.forall(expr =>
+            isDerivable(expr, sel_3a.outputList, sel_3q, sel_3a, None))
+
+          if (isPredicateEmR && isOutputEmR && isOutputRmE) {
+            Seq(sel_3a)
+          } else if (isPredicateEmR && isOutputEmR) {
+            // no compensation needed
+            val sel_3q_exp = sel_3q.transformExpressions({
+              case a: Alias => sel_3a.outputList
+                .find { a1 =>
+                  a1.isInstanceOf[Alias] &&
+                  a1.asInstanceOf[Alias].child.semanticEquals(a.child)
+                }.map(_.toAttribute).get
+            })
+            val wip = sel_3q_exp.copy(
+              children = Seq(sel_3a),
+              aliasMap = Seq(0 -> rewrite.newSubsumerName()).toMap)
+            val done = factorOutSubsumer(wip, sel_3a, wip.aliasMap)
+            Seq(done)
+          } else {
+            Nil
+          }
+        } else Nil
+
+      case _ => Nil
+    }
+  }
+
+  private def updateDuplicateColumns(sel_1a: Select) = {
+    val duplicateNameCols = sel_1a.outputList.groupBy(_.name).filter(_._2.length > 1).flatMap(_._2)
+      .toList
+    val updatedOutList = sel_1a.outputList.map { col =>
+      if (duplicateNameCols.contains(col)) {
+        Alias(col, col.qualifiedName)(exprId = col.exprId)
+      } else {
+        col
+      }
+    }
+    updatedOutList
+  }
+}
+
+object GroupbyGroupbyNoChildDelta extends DefaultMatchPattern {
+  def apply(
+      subsumer: ModularPlan,
+      subsumee: ModularPlan,
+      compensation: Option[ModularPlan],
+      rewrite: QueryRewrite): Seq[ModularPlan] = {
+    (subsumer, subsumee, compensation) match {
+      case (
+        gb_2a @ modular.GroupBy(_, _, _, _, _, _, _, _),
+        gb_2q @ modular.GroupBy(_, _, _, _, _, _, _, _),
+        None) =>
+        val isGroupingEmR = gb_2q.predicateList.forall(expr =>
+          gb_2a.predicateList.exists(_.semanticEquals(expr)))
+        val isGroupingRmE = gb_2a.predicateList.forall(expr =>
+          gb_2q.predicateList.exists(_.semanticEquals(expr)))
+        if (isGroupingEmR && isGroupingRmE) {
+          val isOutputEmR = gb_2q.outputList.forall {
+            case a @ Alias(_, _) =>
+              gb_2a.outputList.exists{a1 =>
+                a1.isInstanceOf[Alias] && a1.asInstanceOf[Alias].child.semanticEquals(a.child)
+              }
+            case exp => gb_2a.outputList.exists(_.semanticEquals(exp))
+          }
+
+          if (isOutputEmR) {
+            // Mappings of output of two plans by checking semantic equals.
+            val mappings = gb_2a.outputList.zipWithIndex.map { case(exp, index) =>
+              (exp, gb_2q.outputList.find {
+                case a: Alias if exp.isInstanceOf[Alias] =>
+                  a.child.semanticEquals(exp.children.head)
+                case a: Alias => a.child.semanticEquals(exp)
+                case other => other.semanticEquals(exp)
+              }.getOrElse(gb_2a.outputList(index)))
+            }
+
+            val oList = mappings.map{case (out1, out2) =>
+              if (out1.name != out2.name) out1 match {
+                case alias: Alias => Alias(alias.child, out2.name)(exprId = alias.exprId)
+                case _ => Alias(out1, out2.name)(exprId = out2.exprId)
+              } else out1
+            }
+
+            Seq(gb_2a.copy(outputList = oList))
+          } else {
+            Nil
+          }
+        } else {
+          val aliasMap = AttributeMap(gb_2a.outputList.collect { case a: Alias =>
+            (a.toAttribute, a)})
+          if (isGroupingEmR) {
+            Utils.tryMatch(
+              gb_2a, gb_2q, aliasMap).flatMap {
+              case g: GroupBy =>
+                Some(g.copy(child = g.child.withNewChildren(
+                  g.child.children.map {
+                    case modular.Select(_, _, _, _, _, _, _, _, _, _) => gb_2a;
+                    case other => other
+                  })));
+              case _ => None}.map(Seq(_)).getOrElse(Nil)
+          } else {
+            Nil
+          }
+        }
+
+      case _ => Nil
+    }
+  }
+}
+
+object GroupbyGroupbySelectOnlyChildDelta extends DefaultMatchPattern with PredicateHelper {
+  private def isDerivable(
+      exprE: Expression,
+      exprListR: Seq[Expression],
+      subsumee: ModularPlan,
+      subsumer: ModularPlan,
+      compensation: Option[ModularPlan]) = {
+    if (subsumee.asInstanceOf[GroupBy].predicateList.contains(exprE)) {
+      if (exprListR.exists(_.semanticEquals(exprE)) || canEvaluate(exprE, exprListR)) true
+      else false
+    } else if (compensation.getOrElse(throw new RuntimeException("compensation cannot be None"))
+      .asInstanceOf[Select].predicateList.contains(exprE)) {
+      if (canEvaluate(exprE, exprListR) || exprListR.exists(_.semanticEquals(exprE))) true
+      else false
+    } else {
+      false
+    }
+  }
+
+  def apply(
+      subsumer: ModularPlan,
+      subsumee: ModularPlan,
+      compensation: Option[ModularPlan],
+      rewrite: QueryRewrite): Seq[ModularPlan] = {
+    val aggInputEinR = subsumee.expressions
+      .collect { case agg: aggregate.AggregateExpression => AttributeSet(Seq(agg))
+        .subsetOf(subsumer.outputSet)
+      }.forall(identity)
+    val compensationSelectOnly = !compensation.map { _.collect { case n => n.getClass } }
+      .exists(_.contains(modular.GroupBy))
+
+    (subsumer, subsumee, compensation, aggInputEinR, compensationSelectOnly) match {
+      case (
+        gb_2a @ modular.GroupBy(_, _, _, _, _, _, _, _),
+        gb_2q @ modular.GroupBy(_, _, _, _, _, _, _, _),
+        Some(sel_1c1 @ modular.Select(_, _, _, _, _, _, _, _, _, _)),
+        true,
+        true)
+        if !gb_2q.flags.hasFlag(EXPAND) && !gb_2a.flags.hasFlag(EXPAND) =>
+
+        val rejoinOutputList = sel_1c1.children.tail.flatMap(_.output)
+        val isGroupingEdR = gb_2q.predicateList.forall(expr =>
+          isDerivable(expr, gb_2a.predicateList ++ rejoinOutputList, gb_2q, gb_2a, compensation))
+        val needRegrouping = !gb_2a.predicateList.forall(gb_2q.predicateList.contains)
+        val canPullup = sel_1c1.predicateList.forall(expr =>
+          isDerivable(expr, gb_2a.predicateList ++ rejoinOutputList, gb_2q, gb_2a, compensation))
+        val isAggEmR = gb_2q.outputList.collect {
+          case agg: aggregate.AggregateExpression =>
+            gb_2a.outputList.exists(_.semanticEquals(agg))
+        }.forall(identity)
+
+        if (isGroupingEdR && ((!needRegrouping && isAggEmR) || needRegrouping) && canPullup) {
+          // pull up
+          val pullupOutputList = gb_2a.outputList.map(_.toAttribute) ++ rejoinOutputList
+          val sel_2c1 = sel_1c1.copy(
+            outputList = pullupOutputList,
+            inputList = pullupOutputList,
+            children = sel_1c1.children.map {
+              case s: Select => gb_2a
+              case other => other })
+
+          if (rejoinOutputList.isEmpty) {
+            val aliasMap = AttributeMap(gb_2a.outputList.collect {
+              case a: Alias => (a.toAttribute, a) })
+            Utils.tryMatch(gb_2a, gb_2q, aliasMap).flatMap {
+              case g: GroupBy => Some(g.copy(child = sel_2c1));
+              case _ => None
+            }.map { wip =>
+              factorOutSubsumer(wip, gb_2a, sel_1c1.aliasMap)
+            }.map(Seq(_))
+             .getOrElse(Nil)
+          }
+          // TODO: implement regrouping with 1:N rejoin (rejoin tables being the "1" side)
+          // via catalog service
+          else if (!needRegrouping && isAggEmR) {
+            Seq(sel_2c1).map(wip => factorOutSubsumer(wip, gb_2a, sel_1c1.aliasMap))
+          } else Nil
+        } else Nil
+
+      case _ => Nil
+    }
+  }
+}
+
+object GroupbyGroupbyGroupbyChildDelta extends DefaultMatchPattern {
+  def apply(
+      subsumer: ModularPlan,
+      subsumee: ModularPlan,
+      compensation: Option[ModularPlan],
+      rewrite: QueryRewrite): Seq[ModularPlan] = {
+    val groupbys = compensation.map { _.collect { case g: GroupBy => g } }.getOrElse(Nil).toSet
+
+    (subsumer, subsumee, groupbys.nonEmpty) match {
+      case (
+        modular.Select(_, _, _, _, _, _, _, _, _, _),
+        modular.Select(_, _, _, _, _, _, _, _, _, _),
+        true) =>
+        // TODO: implement me
+        Nil
+
+      case _ => Nil
+    }
+  }
+}
+
+
+object SelectSelectSelectChildDelta extends DefaultMatchPattern {
+  def apply(
+      subsumer: ModularPlan,
+      subsumee: ModularPlan,
+      compensation: Option[ModularPlan],
+      rewrite: QueryRewrite): Seq[ModularPlan] = {
+    val compensationSelectOnly =
+      !compensation
+        .map { _.collect { case n => n.getClass } }
+        .exists(_.contains(modular.GroupBy))
+
+    (subsumer, subsumee, compensationSelectOnly) match {
+      case (
+        modular.Select(_, _, _, _, _, _, _, _, _, _),
+        modular.Select(_, _, _, _, _, _, _, _, _, _),
+        true) =>
+        // TODO: implement me
+        Nil
+      case _ => Nil
+    }
+  }
+}
+
+object SelectSelectGroupbyChildDelta extends DefaultMatchPattern with PredicateHelper {
+  private def isDerivable(
+      exprE: Expression,
+      exprListR: Seq[Expression],
+      subsumee: ModularPlan,
+      subsumer: ModularPlan,
+      compensation: Option[ModularPlan]) = {
+    Utils.isDerivable(
+      exprE: Expression,
+      exprListR: Seq[Expression],
+      subsumee: ModularPlan,
+      subsumer: ModularPlan,
+      compensation: Option[ModularPlan])
+  }
+
+  def apply(
+      subsumer: ModularPlan,
+      subsumee: ModularPlan,
+      compensation: Option[ModularPlan],
+      rewrite: QueryRewrite): Seq[ModularPlan] = {
+    (subsumer, subsumee, compensation, subsumer.children, subsumee.children) match {
+      case (
+        sel_3a@modular.Select(
+        _, _, Nil, _, _,
+        Seq(gb_2a@modular.GroupBy(_, _, _, _, _, _, _, _)), _, _, _, _),
+        sel_3q@modular.Select(
+        _, _, _, _, _,
+        Seq(gb_2q@modular.GroupBy(_, _, _, _, _, _, _, _)), _, _, _, _),
+        Some(gb_2c@modular.GroupBy(_, _, _, _, _, _, _, _)),
+        rchild :: Nil,
+        echild :: Nil) =>
+        val tbls_sel_3a = sel_3a.collect { case tbl: modular.LeafNode => tbl }
+        val tbls_sel_3q = sel_3q.collect { case tbl: modular.LeafNode => tbl }
+
+        val extrajoin = tbls_sel_3a.filterNot(tbls_sel_3q.contains)
+        val rejoin = tbls_sel_3q.filterNot(tbls_sel_3a.contains)
+        val rejoinOutputList = rejoin.flatMap(_.output)
+
+        val isPredicateRmE = sel_3a.predicateList.forall(expr =>
+          sel_3q.predicateList.exists(_.semanticEquals(expr)) ||
+          gb_2c.predicateList.exists(_.semanticEquals(expr)))
+        val isPredicateEmdR = sel_3q.predicateList
+          .forall(expr =>
+            sel_3a.predicateList.exists(_.semanticEquals(expr)) ||
+            isDerivable(
+              expr,
+              sel_3a.outputList ++ rejoinOutputList,
+              sel_3q,
+              sel_3a,
+              compensation))
+        val isOutputEdR = sel_3q.outputList
+          .forall(expr =>
+            isDerivable(
+              expr,
+              sel_3a.outputList ++ rejoinOutputList,
+              sel_3q,
+              sel_3a,
+              compensation))
+
+        val canSELPullup = gb_2c.child.isInstanceOf[Select] &&
+                           gb_2c.child.asInstanceOf[Select].predicateList
+                             .forall(expr =>
+                               isDerivable(
+                                 expr,
+                                 sel_3a.outputList ++ rejoinOutputList,
+                                 sel_3q,
+                                 sel_3a,
+                                 compensation))
+        val canGBPullup = gb_2c.predicateList
+          .forall(expr =>
+            isDerivable(
+              expr,
+              sel_3a.outputList ++ rejoinOutputList,
+              sel_3q,
+              sel_3a,
+              compensation))
+
+        if (extrajoin.isEmpty && isPredicateRmE &&
+            isPredicateEmdR &&
+            isOutputEdR &&
+            canSELPullup &&
+            canGBPullup) {
+          gb_2c.child match {
+            case s: Select =>
+              val sel_3c1 = s.withNewChildren(
+                s.children.map {
+                  case gb: GroupBy => sel_3a.setSkip()
+                  case other => other })
+              val gb_3c2 = gb_2c.copy(child = sel_3c1)
+
+              val aliasMap_exp = AttributeMap(
+                gb_2c.outputList.collect {
+                  case a: Alias => (a.toAttribute, a) })
+              val sel_3q_exp = sel_3q.transformExpressions({
+                case attr: Attribute if aliasMap_exp.contains(attr) => aliasMap_exp(attr)
+              })
+              // Mappings of output of two plans by checking semantic equals.
+              val mappings = sel_3q_exp.outputList.zipWithIndex.map { case(exp, index) =>
+                (exp, gb_2c.outputList.find {
+                  case a: Alias if exp.isInstanceOf[Alias] =>
+                    a.child.semanticEquals(exp.children.head)
+                  case a: Alias => a.child.semanticEquals(exp)
+                  case other => other.semanticEquals(exp)
+                }.getOrElse(gb_2c.outputList(index)))
+              }
+
+              val oList = for ((o1, o2) <- mappings) yield {
+                if (o1.name != o2.name) Alias(o2, o1.name)(exprId = o1.exprId) else o2
+              }
+
+              val wip = sel_3q_exp.copy(outputList = oList, children = Seq(gb_3c2))
+              val sel_3c3 = Some(factorOutSubsumer(wip, sel_3a, s.aliasMap))
+              sel_3c3.map(Seq(_)).getOrElse(Nil)
+
+            case _ => Nil
+          }
+        } else {
+          Nil
+        }
+
+      case _ => Nil
+    }
+  }
+}
+
+

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/MatchConditions.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/MatchConditions.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/MatchConditions.scala
new file mode 100644
index 0000000..2a4da27
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/MatchConditions.scala
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.rewrite
+
+// TODO: implement this to modularize DefaultMatchingFunctions
+object MatchConditions {
+}
+
+class MatchConditions(flags: Long) {
+  def hasFlag(flag: Long): Boolean = {
+    throw new UnsupportedOperationException
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/MatchMaker.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/MatchMaker.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/MatchMaker.scala
new file mode 100644
index 0000000..2c5d8f4
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/MatchMaker.scala
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.rewrite
+
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.catalyst.trees.TreeNode
+
+abstract class MatchPattern[MatchingPlan <: TreeNode[MatchingPlan]] extends Logging {
+
+  def apply(
+      subsumer: MatchingPlan,
+      subsumee: MatchingPlan,
+      compensation: Option[MatchingPlan],
+      rewrite: QueryRewrite): Seq[MatchingPlan]
+
+}
+
+abstract class MatchMaker[MatchingPlan <: TreeNode[MatchingPlan]] {
+
+  /** Define a sequence of rules, to be overridden by the implementation. */
+  protected val patterns: Seq[MatchPattern[MatchingPlan]]
+
+  def execute(
+      subsumer: MatchingPlan,
+      subsumee: MatchingPlan,
+      compensation: Option[MatchingPlan],
+      rewrite: QueryRewrite): Iterator[MatchingPlan] = {
+    val iter = patterns.view.flatMap(_ (subsumer, subsumee, compensation, rewrite)).toIterator
+    iter
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/Navigator.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/Navigator.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/Navigator.scala
new file mode 100644
index 0000000..545920e
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/Navigator.scala
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.rewrite
+
+import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap, AttributeSet}
+
+import org.apache.carbondata.mv.datamap.{MVHelper, MVState}
+import org.apache.carbondata.mv.expressions.modular._
+import org.apache.carbondata.mv.plans.modular.{GroupBy, ModularPlan, Select}
+import org.apache.carbondata.mv.plans.modular
+
+private[mv] class Navigator(catalog: SummaryDatasetCatalog, session: MVState) {
+
+  def rewriteWithSummaryDatasets(plan: ModularPlan, rewrite: QueryRewrite): ModularPlan = {
+    val replaced = plan.transformAllExpressions {
+      case s: ModularSubquery =>
+        if (s.children.isEmpty) {
+          ScalarModularSubquery(
+            rewriteWithSummaryDatasetsCore(s.plan, rewrite), s.children, s.exprId)
+        }
+        else throw new UnsupportedOperationException(s"Rewrite expression $s isn't supported")
+      case o => o
+    }
+    rewriteWithSummaryDatasetsCore(replaced, rewrite)
+  }
+
+  def rewriteWithSummaryDatasetsCore(plan: ModularPlan, rewrite: QueryRewrite): ModularPlan = {
+    val rewrittenPlan = plan transformDown {
+      case currentFragment =>
+        if (currentFragment.rewritten || !currentFragment.isSPJGH) currentFragment
+        else {
+          val compensation =
+            (for { dataset <- catalog.lookupFeasibleSummaryDatasets(currentFragment).toStream
+                   subsumer <- session.modularizer.modularize(
+                     session.optimizer.execute(dataset.plan)).map(_.harmonized)
+                   subsumee <- unifySubsumee(currentFragment)
+                   comp <- subsume(
+                     unifySubsumer2(
+                       unifySubsumer1(
+                         subsumer,
+                         subsumee,
+                         dataset.relation),
+                       subsumee),
+                     subsumee, rewrite)
+                 } yield comp).headOption
+          compensation.map(_.setRewritten).getOrElse(currentFragment)
+        }
+    }
+    // In case it is rewritten plan and the datamap table is not updated then update the datamap
+    // table in plan.
+    if (rewrittenPlan.find(_.rewritten).isDefined) {
+      val updatedDataMapTablePlan = rewrittenPlan transform {
+        case s: Select =>
+          MVHelper.updateDataMap(s, rewrite)
+        case g: GroupBy =>
+          MVHelper.updateDataMap(g, rewrite)
+      }
+      // TODO Find a better way to set the rewritten flag, it may fail in some conditions.
+      val mapping =
+        rewrittenPlan.collect {case m: ModularPlan => m } zip
+        updatedDataMapTablePlan.collect {case m: ModularPlan => m}
+      mapping.foreach(f => if (f._1.rewritten) f._2.setRewritten())
+
+      updatedDataMapTablePlan
+
+    } else {
+      rewrittenPlan
+    }
+  }
+
+  def subsume(
+      subsumer: ModularPlan,
+      subsumee: ModularPlan,
+      rewrite: QueryRewrite): Option[ModularPlan] = {
+    if (subsumer.getClass == subsumee.getClass) {
+      (subsumer.children, subsumee.children) match {
+        case (Nil, Nil) => None
+        case (r, e) if r.forall(_.isInstanceOf[modular.LeafNode]) &&
+                       e.forall(_.isInstanceOf[modular.LeafNode]) =>
+          val iter = session.matcher.execute(subsumer, subsumee, None, rewrite)
+          if (iter.hasNext) Some(iter.next)
+          else None
+
+        case (rchild :: Nil, echild :: Nil) =>
+          val compensation = subsume(rchild, echild, rewrite)
+          val oiter = compensation.map {
+            case comp if comp.eq(rchild) =>
+              session.matcher.execute(subsumer, subsumee, None, rewrite)
+            case _ =>
+              session.matcher.execute(subsumer, subsumee, compensation, rewrite)
+          }
+          oiter.flatMap { case iter if iter.hasNext => Some(iter.next)
+                          case _ => None }
+
+        case _ => None
+      }
+    } else None
+  }
+
+  private def updateDatamap(rchild: ModularPlan, subsume: ModularPlan) = {
+    val update = rchild match {
+      case s: Select if s.dataMapTableRelation.isDefined =>
+        true
+      case g: GroupBy if g.dataMapTableRelation.isDefined =>
+        true
+      case _ => false
+    }
+
+    if (update) {
+      subsume match {
+        case s: Select =>
+          s.copy(children = Seq(rchild))
+
+        case g: GroupBy =>
+          g.copy(child = rchild)
+        case _ => subsume
+      }
+    } else {
+      subsume
+    }
+  }
+
+  // add Select operator as placeholder on top of subsumee to facilitate matching
+  def unifySubsumee(subsumee: ModularPlan): Option[ModularPlan] = {
+    subsumee match {
+      case gb @ modular.GroupBy(_, _, _, _,
+        modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _) =>
+        Some(
+          Select(gb.outputList, gb.outputList, Nil, Map.empty, Nil, gb :: Nil, gb.flags,
+            gb.flagSpec, Seq.empty))
+      case other => Some(other)
+    }
+  }
+
+  // add Select operator as placeholder on top of subsumer to facilitate matching
+  def unifySubsumer1(
+      subsumer: ModularPlan,
+      subsumee: ModularPlan,
+      dataMapRelation: ModularPlan): ModularPlan = {
+    // Update datamap table relation to the subsumer modular plan
+    val updatedSubsumer = subsumer match {
+      case s: Select => s.copy(dataMapTableRelation = Some(dataMapRelation))
+      case g: GroupBy => g.copy(dataMapTableRelation = Some(dataMapRelation))
+      case other => other
+    }
+    (updatedSubsumer, subsumee) match {
+      case (r @
+        modular.GroupBy(_, _, _, _, modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _),
+        modular.Select(_, _, _, _, _,
+          Seq(modular.GroupBy(_, _, _, _, modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)),
+          _, _, _, _)
+        ) =>
+        modular.Select(
+          r.outputList, r.outputList, Nil, Map.empty, Nil, r :: Nil, r.flags,
+          r.flagSpec, Seq.empty).setSkip()
+      case _ => updatedSubsumer.setSkip()
+    }
+  }
+
+  def unifySubsumer2(subsumer: ModularPlan, subsumee: ModularPlan): ModularPlan = {
+    val rtables = subsumer.collect { case n: modular.LeafNode => n }
+    val etables = subsumee.collect { case n: modular.LeafNode => n }
+    val pairs = for {
+      rtable <- rtables
+      etable <- etables
+      if (rtable == etable)
+    } yield (rtable, etable)
+
+    pairs.foldLeft(subsumer) {
+      case (curSubsumer, pair) =>
+        val nxtSubsumer = curSubsumer.transform { case pair._1 => pair._2 }
+        val attributeSet = AttributeSet(pair._1.output)
+        val rewrites = AttributeMap(pair._1.output.zip(pair._2.output))
+        nxtSubsumer.transformUp {
+          case p => p.transformExpressions {
+            case a: Attribute if attributeSet contains a => rewrites(a).withQualifier(a.qualifier)
+          }
+        }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/QueryRewrite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/QueryRewrite.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/QueryRewrite.scala
new file mode 100644
index 0000000..5039d66
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/QueryRewrite.scala
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.rewrite
+
+import java.util.concurrent.atomic.AtomicLong
+
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+
+import org.apache.carbondata.mv.datamap.MVState
+import org.apache.carbondata.mv.plans.modular.ModularPlan
+
+/**
+ * The primary workflow for rewriting relational queries using Spark libraries.
+ */
+class QueryRewrite private (
+    state: MVState,
+    logical: LogicalPlan,
+    nextSubqueryId: AtomicLong) {
+  self =>
+
+  def this(state: MVState, logical: LogicalPlan) =
+    this(state, logical, new AtomicLong(0))
+
+  def newSubsumerName(): String = s"gen_subsumer_${nextSubqueryId.getAndIncrement()}"
+
+  lazy val optimizedPlan: LogicalPlan =
+    state.optimizer.execute(logical)
+
+  lazy val modularPlan: ModularPlan =
+    state.modularizer.modularize(optimizedPlan).next().harmonized
+
+  lazy val withSummaryData: ModularPlan =
+    state.navigator.rewriteWithSummaryDatasets(modularPlan, self)
+
+  lazy val toCompactSQL: String = withSummaryData.asCompactSQL
+
+  lazy val toOneLineSQL: String = withSummaryData.asOneLineSQL
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/SummaryDatasetCatalog.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/SummaryDatasetCatalog.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/SummaryDatasetCatalog.scala
new file mode 100644
index 0000000..c29c08f
--- /dev/null
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/SummaryDatasetCatalog.scala
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.rewrite
+
+import java.util.concurrent.locks.ReentrantReadWriteLock
+
+import org.apache.spark.sql.{DataFrame, SparkSession}
+import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.execution.datasources.FindDataSourceTable
+import org.apache.spark.sql.parser.CarbonSpark2SqlParser
+
+import org.apache.carbondata.core.datamap.DataMapCatalog
+import org.apache.carbondata.core.datamap.status.DataMapStatusManager
+import org.apache.carbondata.core.metadata.schema.table.DataMapSchema
+import org.apache.carbondata.mv.datamap.{MVHelper, MVState}
+import org.apache.carbondata.mv.plans.modular.{Flags, ModularPlan, ModularRelation, Select}
+import org.apache.carbondata.mv.plans.util.Signature
+
+/** Holds a summary logical plan */
+private[mv] case class SummaryDataset(signature: Option[Signature],
+    plan: LogicalPlan,
+    dataMapSchema: DataMapSchema,
+    relation: ModularPlan)
+
+private[mv] class SummaryDatasetCatalog(sparkSession: SparkSession)
+  extends DataMapCatalog[SummaryDataset] {
+
+  @transient
+  private val summaryDatasets = new scala.collection.mutable.ArrayBuffer[SummaryDataset]
+
+  val mVState = new MVState(this)
+
+  @transient
+  private val registerLock = new ReentrantReadWriteLock
+
+  /**
+   * parser
+   */
+  lazy val parser = new CarbonSpark2SqlParser
+
+  /** Acquires a read lock on the catalog for the duration of `f`. */
+  private def readLock[A](f: => A): A = {
+    val lock = registerLock.readLock()
+    lock.lock()
+    try f finally {
+      lock.unlock()
+    }
+  }
+
+  /** Acquires a write lock on the catalog for the duration of `f`. */
+  private def writeLock[A](f: => A): A = {
+    val lock = registerLock.writeLock()
+    lock.lock()
+    try f finally {
+      lock.unlock()
+    }
+  }
+
+  /** Clears all summary tables. */
+  private[mv] def refresh(): Unit = {
+    writeLock {
+      summaryDatasets.clear()
+    }
+  }
+
+  /** Checks if the catalog is empty. */
+  private[mv] def isEmpty: Boolean = {
+    readLock {
+      summaryDatasets.isEmpty
+    }
+  }
+
+  /**
+   * Registers the data produced by the logical representation of the given [[DataFrame]]. Unlike
+   * `RDD.cache()`, the default storage level is set to be `MEMORY_AND_DISK` because recomputing
+   * the in-memory columnar representation of the underlying table is expensive.
+   */
+  private[mv] def registerSchema(dataMapSchema: DataMapSchema): Unit = {
+    writeLock {
+      // TODO Add mvfunction here, don't use preagg function
+      val updatedQuery = parser.addPreAggFunction(dataMapSchema.getCtasQuery)
+      val query = sparkSession.sql(updatedQuery)
+      val planToRegister = MVHelper.dropDummFuc(query.queryExecution.analyzed)
+      val modularPlan = mVState.modularizer.modularize(mVState.optimizer.execute(planToRegister))
+        .next()
+        .harmonized
+      val signature = modularPlan.signature
+      val identifier = dataMapSchema.getRelationIdentifier
+      val output = new FindDataSourceTable(sparkSession).apply(sparkSession.sessionState.catalog
+        .lookupRelation(TableIdentifier(identifier.getTableName, Some(identifier.getDatabaseName))))
+        .output
+      val relation = ModularRelation(identifier.getDatabaseName,
+        identifier.getTableName,
+        output,
+        Flags.NoFlags,
+        Seq.empty)
+      val select = Select(relation.outputList,
+        relation.outputList,
+        Seq.empty,
+        Seq((0, identifier.getTableName)).toMap,
+        Seq.empty,
+        Seq(relation),
+        Flags.NoFlags,
+        Seq.empty,
+        Seq.empty,
+        None)
+
+      summaryDatasets += SummaryDataset(signature, planToRegister, dataMapSchema, select)
+    }
+  }
+
+  /** Removes the given [[DataFrame]] from the catalog */
+  private[mv] def unregisterSchema(dataMapName: String): Unit = {
+    writeLock {
+      val dataIndex = summaryDatasets
+        .indexWhere(sd => sd.dataMapSchema.getDataMapName.equals(dataMapName))
+      require(dataIndex >= 0, s"Datamap $dataMapName is not registered.")
+      summaryDatasets.remove(dataIndex)
+    }
+  }
+
+
+  override def listAllSchema(): Array[SummaryDataset] = summaryDatasets.toArray
+
+  /** Returns feasible registered summary data sets for processing the given ModularPlan. */
+  private[mv] def lookupFeasibleSummaryDatasets(plan: ModularPlan): Seq[SummaryDataset] = {
+    readLock {
+      val sig = plan.signature
+      val statusDetails = DataMapStatusManager.getEnabledDataMapStatusDetails
+      // Only select the enabled datamaps for the query.
+      val enabledDataSets = summaryDatasets.filter{p =>
+        statusDetails.exists(_.getDataMapName.equalsIgnoreCase(p.dataMapSchema.getDataMapName))
+      }
+      val feasible = enabledDataSets.filter { x =>
+        (x.signature, sig) match {
+          case (Some(sig1), Some(sig2)) =>
+            if (sig1.groupby && sig2.groupby && sig1.datasets.subsetOf(sig2.datasets)) {
+              true
+            } else if (!sig1.groupby && !sig2.groupby && sig1.datasets.subsetOf(sig2.datasets)) {
+              true
+            } else {
+              false
+            }
+
+          case _ => false
+        }
+      }
+      // heuristics: more tables involved in a summary data set => higher query reduction factor
+      feasible.sortWith(_.signature.get.datasets.size > _.signature.get.datasets.size)
+    }
+  }
+}


[06/50] [abbrv] carbondata git commit: [CARBONDATA-2454][DataMap] Add fpp property for bloom datamap

Posted by ma...@apache.org.
[CARBONDATA-2454][DataMap] Add fpp property for bloom datamap

add fpp(false positive probability) property to configure bloom filter
that used by bloom datamap.

This closes #2279


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/6b949716
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/6b949716
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/6b949716

Branch: refs/heads/spark-2.3
Commit: 6b949716bc3263dd3c3c80ba5097b3885f0af208
Parents: 2c0fa10
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Tue May 8 12:00:39 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Wed May 9 10:23:58 2018 +0800

----------------------------------------------------------------------
 .../bloom/BloomCoarseGrainDataMapFactory.java   | 46 +++++++++++++++++++-
 .../datamap/bloom/BloomDataMapRefresher.java    |  6 ++-
 .../datamap/bloom/BloomDataMapWriter.java       |  7 ++-
 3 files changed, 53 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b949716/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
index 2d43c40..4e62526 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
@@ -65,9 +65,18 @@ public class BloomCoarseGrainDataMapFactory extends DataMapFactory<CoarseGrainDa
    * and all the indexed value is distinct.
    */
   private static final int DEFAULT_BLOOM_FILTER_SIZE = 32000 * 20;
+  /**
+   * property for fpp(false-positive-probability) of bloom filter
+   */
+  private static final String BLOOM_FPP = "bloom_fpp";
+  /**
+   * default value for fpp of bloom filter
+   */
+  private static final double DEFAULT_BLOOM_FILTER_FPP = 0.00001d;
   private DataMapMeta dataMapMeta;
   private String dataMapName;
   private int bloomFilterSize;
+  private double bloomFilterFpp;
 
   public BloomCoarseGrainDataMapFactory(CarbonTable carbonTable, DataMapSchema dataMapSchema)
       throws MalformedDataMapCommandException {
@@ -79,6 +88,7 @@ public class BloomCoarseGrainDataMapFactory extends DataMapFactory<CoarseGrainDa
 
     List<CarbonColumn> indexedColumns = carbonTable.getIndexedColumns(dataMapSchema);
     this.bloomFilterSize = validateAndGetBloomFilterSize(dataMapSchema);
+    this.bloomFilterFpp = validateAndGetBloomFilterFpp(dataMapSchema);
     List<ExpressionType> optimizedOperations = new ArrayList<ExpressionType>();
     // todo: support more optimize operations
     optimizedOperations.add(ExpressionType.EQUALS);
@@ -118,19 +128,51 @@ public class BloomCoarseGrainDataMapFactory extends DataMapFactory<CoarseGrainDa
     return bloomFilterSize;
   }
 
+  /**
+   * validate bloom DataMap BLOOM_FPP
+   * 1. BLOOM_FPP property is optional, 0.00001 will be the default value.
+   * 2. BLOOM_FPP should be (0, 1)
+   */
+  private double validateAndGetBloomFilterFpp(DataMapSchema dmSchema)
+      throws MalformedDataMapCommandException {
+    String bloomFilterFppStr = dmSchema.getProperties().get(BLOOM_FPP);
+    if (StringUtils.isBlank(bloomFilterFppStr)) {
+      LOGGER.warn(
+          String.format("Bloom filter FPP is not configured for datamap %s, use default value %f",
+              dataMapName, DEFAULT_BLOOM_FILTER_FPP));
+      return DEFAULT_BLOOM_FILTER_FPP;
+    }
+    double bloomFilterFpp;
+    try {
+      bloomFilterFpp = Double.parseDouble(bloomFilterFppStr);
+    } catch (NumberFormatException e) {
+      throw new MalformedDataMapCommandException(
+          String.format("Invalid value of bloom filter fpp '%s', it should be an numeric",
+              bloomFilterFppStr));
+    }
+    if (bloomFilterFpp < 0 || bloomFilterFpp - 1 >= 0) {
+      throw new MalformedDataMapCommandException(
+          String.format("Invalid value of bloom filter fpp '%s', it should be in range 0~1",
+              bloomFilterFppStr));
+    }
+    return bloomFilterFpp;
+  }
+
   @Override
   public DataMapWriter createWriter(Segment segment, String shardName) throws IOException {
     LOGGER.info(
         String.format("Data of BloomCoarseGranDataMap %s for table %s will be written to %s",
             this.dataMapName, getCarbonTable().getTableName() , shardName));
     return new BloomDataMapWriter(getCarbonTable().getTablePath(), this.dataMapName,
-        this.dataMapMeta.getIndexedColumns(), segment, shardName, this.bloomFilterSize);
+        this.dataMapMeta.getIndexedColumns(), segment, shardName,
+        this.bloomFilterSize, this.bloomFilterFpp);
   }
 
   @Override
   public DataMapRefresher createRefresher(Segment segment, String shardName) throws IOException {
     return new BloomDataMapRefresher(getCarbonTable().getTablePath(), this.dataMapName,
-        this.dataMapMeta.getIndexedColumns(), segment, shardName, this.bloomFilterSize);
+        this.dataMapMeta.getIndexedColumns(), segment, shardName,
+        this.bloomFilterSize, this.bloomFilterFpp);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b949716/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapRefresher.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapRefresher.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapRefresher.java
index cb86c39..8e05133 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapRefresher.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapRefresher.java
@@ -36,8 +36,10 @@ import org.apache.carbondata.core.util.CarbonUtil;
 public class BloomDataMapRefresher extends BloomDataMapWriter implements DataMapRefresher {
 
   BloomDataMapRefresher(String tablePath, String dataMapName, List<CarbonColumn> indexColumns,
-      Segment segment, String shardName, int bloomFilterSize) throws IOException {
-    super(tablePath, dataMapName, indexColumns, segment, shardName, bloomFilterSize);
+      Segment segment, String shardName, int bloomFilterSize, double bloomFilterFpp)
+      throws IOException {
+    super(tablePath, dataMapName, indexColumns, segment, shardName,
+        bloomFilterSize, bloomFilterFpp);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b949716/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java
index 4e07182..a55de11 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java
@@ -50,6 +50,7 @@ public class BloomDataMapWriter extends DataMapWriter {
   private static final LogService LOG = LogServiceFactory.getLogService(
       BloomDataMapWriter.class.getCanonicalName());
   private int bloomFilterSize;
+  private double bloomFilterFpp;
   protected int currentBlockletId;
   private List<String> currentDMFiles;
   private List<DataOutputStream> currentDataOutStreams;
@@ -57,9 +58,11 @@ public class BloomDataMapWriter extends DataMapWriter {
   protected List<BloomFilter<byte[]>> indexBloomFilters;
 
   BloomDataMapWriter(String tablePath, String dataMapName, List<CarbonColumn> indexColumns,
-      Segment segment, String shardName, int bloomFilterSize) throws IOException {
+      Segment segment, String shardName, int bloomFilterSize, double bloomFilterFpp)
+      throws IOException {
     super(tablePath, dataMapName, indexColumns, segment, shardName);
     this.bloomFilterSize = bloomFilterSize;
+    this.bloomFilterFpp = bloomFilterFpp;
 
     currentDMFiles = new ArrayList<String>(indexColumns.size());
     currentDataOutStreams = new ArrayList<DataOutputStream>(indexColumns.size());
@@ -86,7 +89,7 @@ public class BloomDataMapWriter extends DataMapWriter {
     List<CarbonColumn> indexColumns = getIndexColumns();
     for (int i = 0; i < indexColumns.size(); i++) {
       indexBloomFilters.add(BloomFilter.create(Funnels.byteArrayFunnel(),
-          bloomFilterSize, 0.00001d));
+          bloomFilterSize, bloomFilterFpp));
     }
   }
 


[49/50] [abbrv] carbondata git commit: [CARBONDATA-2477]Fixed No dictionary Complex type with double/date/decimal data type

Posted by ma...@apache.org.
[CARBONDATA-2477]Fixed No dictionary Complex type with double/date/decimal data type

Problem: SDK create table with No Dictionary complex type is failing when complex type child contain double/date/decimal data type
Solution: In complex type validation , it is not allowing double/date/decimal data , need to remove the same
Changed no dictionary complex type storage format, instead of storing length in int , now storing in short to reduce storage space

This closes #2304


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/6297ea0b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/6297ea0b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/6297ea0b

Branch: refs/heads/spark-2.3
Commit: 6297ea0b4092539fa0aa2c6f772d6984850c6110
Parents: cf1b50b
Author: kumarvishal09 <ku...@gmail.com>
Authored: Mon May 14 14:17:38 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu May 17 19:05:30 2018 +0530

----------------------------------------------------------------------
 .../carbondata/core/datastore/ColumnType.java   | 14 ++-
 .../core/datastore/page/ColumnPage.java         | 82 ++++++++++++++++--
 .../core/datastore/page/ComplexColumnPage.java  | 16 +++-
 .../core/datastore/page/LazyColumnPage.java     | 13 ++-
 .../datastore/page/SafeFixLengthColumnPage.java | 25 +++++-
 .../datastore/page/SafeVarLengthColumnPage.java | 21 +++++
 .../page/UnsafeFixLengthColumnPage.java         | 39 ++++++++-
 .../datastore/page/VarLengthColumnPageBase.java | 90 ++++++++++++++++++--
 .../page/encoding/ColumnPageEncoder.java        |  9 +-
 .../scan/complextypes/PrimitiveQueryType.java   |  4 +-
 .../core/scan/complextypes/StructQueryType.java |  8 +-
 .../apache/carbondata/core/util/ByteUtil.java   |  9 ++
 ...ransactionalCarbonTableWithComplexType.scala | 76 ++++++++++++++++-
 .../processing/datatypes/ArrayDataType.java     |  7 ++
 .../processing/datatypes/GenericDataType.java   |  4 +
 .../processing/datatypes/PrimitiveDataType.java | 17 ++--
 .../processing/datatypes/StructDataType.java    | 30 +++----
 .../carbondata/processing/store/TablePage.java  |  6 +-
 .../sdk/file/CarbonWriterBuilder.java           |  9 --
 19 files changed, 407 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/datastore/ColumnType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/ColumnType.java b/core/src/main/java/org/apache/carbondata/core/datastore/ColumnType.java
index f98307b..8bbf12d 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/ColumnType.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/ColumnType.java
@@ -31,7 +31,13 @@ public enum ColumnType {
   COMPLEX,
 
   // measure column, numerical data type
-  MEASURE;
+  MEASURE,
+
+  COMPLEX_STRUCT,
+
+  COMPLEX_ARRAY,
+
+  COMPLEX_PRIMITIVE;
 
   public static ColumnType valueOf(int ordinal) {
     if (ordinal == GLOBAL_DICTIONARY.ordinal()) {
@@ -44,6 +50,12 @@ public enum ColumnType {
       return COMPLEX;
     } else if (ordinal == MEASURE.ordinal()) {
       return MEASURE;
+    } else if (ordinal == COMPLEX_STRUCT.ordinal()) {
+      return COMPLEX_STRUCT;
+    } else if (ordinal == COMPLEX_ARRAY.ordinal()) {
+      return COMPLEX_ARRAY;
+    } else if (ordinal == COMPLEX_PRIMITIVE.ordinal()) {
+      return COMPLEX_PRIMITIVE;
     } else {
       throw new RuntimeException("create ColumnType with invalid ordinal: " + ordinal);
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
index 68269fb..69ed437 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
@@ -22,6 +22,7 @@ import java.math.BigDecimal;
 import java.util.BitSet;
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastore.ColumnType;
 import org.apache.carbondata.core.datastore.TableSpec;
 import org.apache.carbondata.core.datastore.compression.Compressor;
 import org.apache.carbondata.core.datastore.compression.CompressorFactory;
@@ -153,6 +154,19 @@ public abstract class ColumnPage {
     }
   }
 
+  private static ColumnPage createFixLengthByteArrayPage(TableSpec.ColumnSpec columnSpec,
+      DataType dataType, int pageSize, int eachValueSize) {
+    if (unsafe) {
+      try {
+        return new UnsafeFixLengthColumnPage(columnSpec, dataType, pageSize, eachValueSize);
+      } catch (MemoryException e) {
+        throw new RuntimeException(e);
+      }
+    } else {
+      return new SafeFixLengthColumnPage(columnSpec, dataType, pageSize, eachValueSize);
+    }
+  }
+
   private static ColumnPage createPage(TableSpec.ColumnSpec columnSpec, DataType dataType,
       int pageSize) {
     if (DataTypes.isDecimal(dataType)) {
@@ -281,8 +295,31 @@ public abstract class ColumnPage {
   }
 
   private static ColumnPage newLVBytesPage(TableSpec.ColumnSpec columnSpec,
-      byte[] lvEncodedByteArray) throws MemoryException {
-    return VarLengthColumnPageBase.newLVBytesColumnPage(columnSpec, lvEncodedByteArray);
+      byte[] lvEncodedByteArray, int lvLength) throws MemoryException {
+    return VarLengthColumnPageBase.newLVBytesColumnPage(columnSpec, lvEncodedByteArray, lvLength);
+  }
+
+  private static ColumnPage newComplexLVBytesPage(TableSpec.ColumnSpec columnSpec,
+      byte[] lvEncodedByteArray, int lvLength) throws MemoryException {
+    return VarLengthColumnPageBase
+        .newComplexLVBytesColumnPage(columnSpec, lvEncodedByteArray, lvLength);
+  }
+
+  private static ColumnPage newFixedByteArrayPage(TableSpec.ColumnSpec columnSpec,
+      byte[] lvEncodedByteArray, int eachValueSize) throws MemoryException {
+    int pageSize = lvEncodedByteArray.length / eachValueSize;
+    ColumnPage fixLengthByteArrayPage =
+        createFixLengthByteArrayPage(columnSpec, columnSpec.getSchemaDataType(), pageSize,
+            eachValueSize);
+    byte[] data = null;
+    int offset = 0;
+    for (int i = 0; i < pageSize; i++) {
+      data = new byte[eachValueSize];
+      System.arraycopy(lvEncodedByteArray, offset, data, 0, eachValueSize);
+      fixLengthByteArrayPage.putBytes(i, data);
+      offset += eachValueSize;
+    }
+    return fixLengthByteArrayPage;
   }
 
   /**
@@ -607,6 +644,20 @@ public abstract class ColumnPage {
   public abstract byte[] getLVFlattenedBytePage() throws IOException;
 
   /**
+   * For complex type columns
+   * @return
+   * @throws IOException
+   */
+  public abstract byte[] getComplexChildrenLVFlattenedBytePage() throws IOException;
+
+  /**
+   * For complex type columns
+   * @return
+   * @throws IOException
+   */
+  public abstract byte[] getComplexParentFlattenedBytePage() throws IOException;
+
+  /**
    * For decimals
    */
   public abstract byte[] getDecimalPage();
@@ -638,6 +689,13 @@ public abstract class ColumnPage {
       return compressor.compressDouble(getDoublePage());
     } else if (DataTypes.isDecimal(dataType)) {
       return compressor.compressByte(getDecimalPage());
+    } else if (dataType == DataTypes.BYTE_ARRAY
+        && columnSpec.getColumnType() == ColumnType.COMPLEX_PRIMITIVE) {
+      return compressor.compressByte(getComplexChildrenLVFlattenedBytePage());
+    } else if (dataType == DataTypes.BYTE_ARRAY && (
+        columnSpec.getColumnType() == ColumnType.COMPLEX_STRUCT
+            || columnSpec.getColumnType() == ColumnType.COMPLEX_ARRAY)) {
+      return compressor.compressByte(getComplexParentFlattenedBytePage());
     } else if (dataType == DataTypes.BYTE_ARRAY) {
       return compressor.compressByte(getLVFlattenedBytePage());
     } else {
@@ -676,12 +734,26 @@ public abstract class ColumnPage {
     } else if (storeDataType == DataTypes.DOUBLE) {
       double[] doubleData = compressor.unCompressDouble(compressedData, offset, length);
       return newDoublePage(columnSpec, doubleData);
+    } else if (storeDataType == DataTypes.BYTE_ARRAY
+        && columnSpec.getColumnType() == ColumnType.COMPLEX_PRIMITIVE) {
+      byte[] lvVarBytes = compressor.unCompressByte(compressedData, offset, length);
+      return newComplexLVBytesPage(columnSpec, lvVarBytes,
+          CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
+    } else if (storeDataType == DataTypes.BYTE_ARRAY
+        && columnSpec.getColumnType() == ColumnType.COMPLEX_STRUCT) {
+      byte[] lvVarBytes = compressor.unCompressByte(compressedData, offset, length);
+      return newFixedByteArrayPage(columnSpec, lvVarBytes,
+          CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
+    } else if (storeDataType == DataTypes.BYTE_ARRAY
+        && columnSpec.getColumnType() == ColumnType.COMPLEX_ARRAY) {
+      byte[] lvVarBytes = compressor.unCompressByte(compressedData, offset, length);
+      return newFixedByteArrayPage(columnSpec, lvVarBytes, CarbonCommonConstants.LONG_SIZE_IN_BYTE);
     } else if (storeDataType == DataTypes.BYTE_ARRAY) {
       byte[] lvVarBytes = compressor.unCompressByte(compressedData, offset, length);
-      return newLVBytesPage(columnSpec, lvVarBytes);
+      return newLVBytesPage(columnSpec, lvVarBytes, CarbonCommonConstants.INT_SIZE_IN_BYTE);
     } else {
-      throw new UnsupportedOperationException("unsupport uncompress column page: " +
-          meta.getStoreDataType());
+      throw new UnsupportedOperationException(
+          "unsupport uncompress column page: " + meta.getStoreDataType());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/datastore/page/ComplexColumnPage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/ComplexColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/ComplexColumnPage.java
index 8cb18e9..07dc837 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/ComplexColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/ComplexColumnPage.java
@@ -22,6 +22,7 @@ import java.util.Iterator;
 import java.util.List;
 
 import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.core.datastore.ColumnType;
 
 // Represent a complex column page, e.g. Array, Struct type column
 public class ComplexColumnPage {
@@ -34,17 +35,20 @@ public class ComplexColumnPage {
   private List<ArrayList<byte[]>> complexColumnData;
 
   // depth is the number of column after complex type is expanded. It is from 1 to N
-  private final int depth;
-
   private final int pageSize;
 
-  public ComplexColumnPage(int pageSize, int depth) {
+  private int depth;
+
+  private List<ColumnType> complexColumnType;
+
+  public ComplexColumnPage(int pageSize, List<ColumnType> complexColumnType) {
     this.pageSize = pageSize;
-    this.depth = depth;
+    this.depth = complexColumnType.size();
     complexColumnData = new ArrayList<>(depth);
     for (int i = 0; i < depth; i++) {
       complexColumnData.add(new ArrayList<byte[]>());
     }
+    this.complexColumnType = complexColumnType;
   }
 
   public void putComplexData(int rowId, int depth, List<byte[]> value) {
@@ -79,4 +83,8 @@ public class ComplexColumnPage {
   public int getPageSize() {
     return pageSize;
   }
+
+  public ColumnType getComplexColumnType(int isDepth) {
+    return complexColumnType.get(isDepth);
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/datastore/page/LazyColumnPage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/LazyColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/LazyColumnPage.java
index ce8aaae..255e078 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/LazyColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/LazyColumnPage.java
@@ -17,6 +17,7 @@
 
 package org.apache.carbondata.core.datastore.page;
 
+import java.io.IOException;
 import java.math.BigDecimal;
 
 import org.apache.carbondata.core.metadata.datatype.DataType;
@@ -171,7 +172,17 @@ public class LazyColumnPage extends ColumnPage {
   }
 
   @Override
-  public byte[] getLVFlattenedBytePage() {
+  public byte[] getLVFlattenedBytePage() throws IOException {
+    throw new UnsupportedOperationException("internal error");
+  }
+
+  @Override
+  public byte[] getComplexChildrenLVFlattenedBytePage() {
+    throw new UnsupportedOperationException("internal error");
+  }
+
+  @Override
+  public byte[] getComplexParentFlattenedBytePage() throws IOException {
     throw new UnsupportedOperationException("internal error");
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeFixLengthColumnPage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeFixLengthColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeFixLengthColumnPage.java
index 1e4445a..2304614 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeFixLengthColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeFixLengthColumnPage.java
@@ -17,6 +17,7 @@
 
 package org.apache.carbondata.core.datastore.page;
 
+import java.io.IOException;
 import java.math.BigDecimal;
 
 import org.apache.carbondata.core.datastore.TableSpec;
@@ -37,11 +38,19 @@ public class SafeFixLengthColumnPage extends ColumnPage {
   private float[] floatData;
   private double[] doubleData;
   private byte[] shortIntData;
+  private byte[][] fixedLengthdata;
+
 
   SafeFixLengthColumnPage(TableSpec.ColumnSpec columnSpec, DataType dataType, int pageSize) {
     super(columnSpec, dataType, pageSize);
   }
 
+  SafeFixLengthColumnPage(TableSpec.ColumnSpec columnSpec, DataType dataType, int pageSize,
+      int eachRowSize) {
+    super(columnSpec, dataType, pageSize);
+    this.fixedLengthdata = new byte[pageSize][];
+  }
+
   /**
    * Set byte value at rowId
    */
@@ -87,7 +96,7 @@ public class SafeFixLengthColumnPage extends ColumnPage {
    */
   @Override
   public void putBytes(int rowId, byte[] bytes) {
-    throw new UnsupportedOperationException("invalid data type: " + dataType);
+    this.fixedLengthdata[rowId] = bytes;
   }
 
   @Override
@@ -173,7 +182,7 @@ public class SafeFixLengthColumnPage extends ColumnPage {
 
   @Override
   public byte[] getBytes(int rowId) {
-    throw new UnsupportedOperationException("invalid data type: " + dataType);
+    return this.fixedLengthdata[rowId];
   }
 
   /**
@@ -241,10 +250,20 @@ public class SafeFixLengthColumnPage extends ColumnPage {
   }
 
   @Override
-  public byte[] getLVFlattenedBytePage() {
+  public byte[] getLVFlattenedBytePage() throws IOException {
     throw new UnsupportedOperationException("invalid data type: " + dataType);
   }
 
+  @Override
+  public byte[] getComplexChildrenLVFlattenedBytePage() {
+    throw new UnsupportedOperationException("internal error");
+  }
+
+  @Override
+  public byte[] getComplexParentFlattenedBytePage() throws IOException {
+    throw new UnsupportedOperationException("internal error");
+  }
+
   /**
    * Set byte values to page
    */

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeVarLengthColumnPage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeVarLengthColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeVarLengthColumnPage.java
index 782b9dc..7b1ad20 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeVarLengthColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/SafeVarLengthColumnPage.java
@@ -82,6 +82,27 @@ public class SafeVarLengthColumnPage extends VarLengthColumnPageBase {
   }
 
   @Override
+  public byte[] getComplexChildrenLVFlattenedBytePage() throws IOException {
+    ByteArrayOutputStream stream = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(stream);
+    for (byte[] byteArrayDatum : byteArrayData) {
+      out.writeShort((short)byteArrayDatum.length);
+      out.write(byteArrayDatum);
+    }
+    return stream.toByteArray();
+  }
+
+  @Override
+  public byte[] getComplexParentFlattenedBytePage() throws IOException {
+    ByteArrayOutputStream stream = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(stream);
+    for (byte[] byteArrayDatum : byteArrayData) {
+      out.write(byteArrayDatum);
+    }
+    return stream.toByteArray();
+  }
+
+  @Override
   public byte[][] getByteArrayPage() {
     return byteArrayData;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeFixLengthColumnPage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeFixLengthColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeFixLengthColumnPage.java
index c88dc0b..6847ab9 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeFixLengthColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeFixLengthColumnPage.java
@@ -44,6 +44,8 @@ public class UnsafeFixLengthColumnPage extends ColumnPage {
   // base offset of memoryBlock
   private long baseOffset;
 
+  private int eachRowSize;
+
   private final long taskId = ThreadLocalTaskInfo.getCarbonTaskInfo().getTaskId();
 
   private static final int byteBits = DataTypes.BYTE.getSizeBits();
@@ -77,6 +79,19 @@ public class UnsafeFixLengthColumnPage extends ColumnPage {
     }
   }
 
+  UnsafeFixLengthColumnPage(TableSpec.ColumnSpec columnSpec, DataType dataType, int pageSize,
+      int eachRowSize)
+      throws MemoryException {
+    this(columnSpec, dataType, pageSize);
+    this.eachRowSize = eachRowSize;
+    if (dataType == DataTypes.BYTE_ARRAY) {
+      memoryBlock =
+          UnsafeMemoryManager.allocateMemoryWithRetry(taskId, (long) pageSize * eachRowSize);
+      baseAddress = memoryBlock.getBaseObject();
+      baseOffset = memoryBlock.getBaseOffset();
+    }
+  }
+
   @Override
   public void putByte(int rowId, byte value) {
     long offset = rowId << byteBits;
@@ -118,7 +133,11 @@ public class UnsafeFixLengthColumnPage extends ColumnPage {
 
   @Override
   public void putBytes(int rowId, byte[] bytes) {
-    throw new UnsupportedOperationException("invalid data type: " + dataType);
+    // copy the data to memory
+    long offset = (long)rowId * eachRowSize;
+    CarbonUnsafe.getUnsafe()
+        .copyMemory(bytes, CarbonUnsafe.BYTE_ARRAY_OFFSET, memoryBlock.getBaseObject(),
+            baseOffset + offset, bytes.length);
   }
 
   @Override
@@ -183,7 +202,14 @@ public class UnsafeFixLengthColumnPage extends ColumnPage {
 
   @Override
   public byte[] getBytes(int rowId) {
-    throw new UnsupportedOperationException("invalid data type: " + dataType);
+    // creating a row
+    byte[] data = new byte[eachRowSize];
+    //copy the row from memory block based on offset
+    // offset position will be index * each column value length
+    CarbonUnsafe.getUnsafe().copyMemory(memoryBlock.getBaseObject(),
+        baseOffset + ((long)rowId * eachRowSize), data,
+        CarbonUnsafe.BYTE_ARRAY_OFFSET, eachRowSize);
+    return data;
   }
 
   @Override public byte[] getDecimalPage() {
@@ -267,6 +293,15 @@ public class UnsafeFixLengthColumnPage extends ColumnPage {
   public byte[] getLVFlattenedBytePage() {
     throw new UnsupportedOperationException("invalid data type: " + dataType);
   }
+  @Override
+  public byte[] getComplexChildrenLVFlattenedBytePage() throws IOException {
+    throw new UnsupportedOperationException("invalid data type: " + dataType);
+  }
+
+  @Override
+  public byte[] getComplexParentFlattenedBytePage() {
+    throw new UnsupportedOperationException("internal error");
+  }
 
   @Override
   public void setBytePage(byte[] byteData) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/datastore/page/VarLengthColumnPageBase.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/VarLengthColumnPageBase.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/VarLengthColumnPageBase.java
index c6062c1..901758a 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/VarLengthColumnPageBase.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/VarLengthColumnPageBase.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.TableSpec;
 import org.apache.carbondata.core.memory.CarbonUnsafe;
 import org.apache.carbondata.core.memory.MemoryBlock;
@@ -115,7 +116,8 @@ public abstract class VarLengthColumnPageBase extends ColumnPage {
     int size = decimalConverter.getSize();
     if (size < 0) {
       return getLVBytesColumnPage(columnSpec, lvEncodedBytes,
-          DataTypes.createDecimalType(columnSpec.getPrecision(), columnSpec.getScale()));
+          DataTypes.createDecimalType(columnSpec.getPrecision(), columnSpec.getScale()),
+          CarbonCommonConstants.INT_SIZE_IN_BYTE);
     } else {
       // Here the size is always fixed.
       return getDecimalColumnPage(columnSpec, lvEncodedBytes, size);
@@ -125,9 +127,17 @@ public abstract class VarLengthColumnPageBase extends ColumnPage {
   /**
    * Create a new column page based on the LV (Length Value) encoded bytes
    */
-  static ColumnPage newLVBytesColumnPage(TableSpec.ColumnSpec columnSpec, byte[] lvEncodedBytes)
-      throws MemoryException {
-    return getLVBytesColumnPage(columnSpec, lvEncodedBytes, DataTypes.BYTE_ARRAY);
+  static ColumnPage newLVBytesColumnPage(TableSpec.ColumnSpec columnSpec, byte[] lvEncodedBytes,
+      int lvLength) throws MemoryException {
+    return getLVBytesColumnPage(columnSpec, lvEncodedBytes, DataTypes.BYTE_ARRAY, lvLength);
+  }
+
+  /**
+   * Create a new column page based on the LV (Length Value) encoded bytes
+   */
+  static ColumnPage newComplexLVBytesColumnPage(TableSpec.ColumnSpec columnSpec,
+      byte[] lvEncodedBytes, int lvLength) throws MemoryException {
+    return getComplexLVBytesColumnPage(columnSpec, lvEncodedBytes, DataTypes.BYTE_ARRAY, lvLength);
   }
 
   private static ColumnPage getDecimalColumnPage(TableSpec.ColumnSpec columnSpec,
@@ -161,7 +171,7 @@ public abstract class VarLengthColumnPageBase extends ColumnPage {
   }
 
   private static ColumnPage getLVBytesColumnPage(TableSpec.ColumnSpec columnSpec,
-      byte[] lvEncodedBytes, DataType dataType)
+      byte[] lvEncodedBytes, DataType dataType, int lvLength)
       throws MemoryException {
     // extract length and data, set them to rowOffset and unsafe memory correspondingly
     int rowId = 0;
@@ -176,11 +186,48 @@ public abstract class VarLengthColumnPageBase extends ColumnPage {
       length = ByteUtil.toInt(lvEncodedBytes, lvEncodedOffset);
       rowOffset.add(offset);
       rowLength.add(length);
-      lvEncodedOffset += 4 + length;
+      lvEncodedOffset += lvLength + length;
+      rowId++;
+    }
+    rowOffset.add(offset);
+    VarLengthColumnPageBase page =
+        getVarLengthColumnPage(columnSpec, lvEncodedBytes, dataType, lvLength, rowId, rowOffset,
+            rowLength, offset);
+    return page;
+  }
+
+  private static ColumnPage getComplexLVBytesColumnPage(TableSpec.ColumnSpec columnSpec,
+      byte[] lvEncodedBytes, DataType dataType, int lvLength)
+      throws MemoryException {
+    // extract length and data, set them to rowOffset and unsafe memory correspondingly
+    int rowId = 0;
+    List<Integer> rowOffset = new ArrayList<>();
+    List<Integer> rowLength = new ArrayList<>();
+    int length;
+    int offset;
+    int lvEncodedOffset = 0;
+
+    // extract Length field in input and calculate total length
+    for (offset = 0; lvEncodedOffset < lvEncodedBytes.length; offset += length) {
+      length = ByteUtil.toShort(lvEncodedBytes, lvEncodedOffset);
+      rowOffset.add(offset);
+      rowLength.add(length);
+      lvEncodedOffset += lvLength + length;
       rowId++;
     }
     rowOffset.add(offset);
 
+    VarLengthColumnPageBase page =
+        getVarLengthColumnPage(columnSpec, lvEncodedBytes, dataType, lvLength, rowId, rowOffset,
+            rowLength, offset);
+    return page;
+  }
+
+  private static VarLengthColumnPageBase getVarLengthColumnPage(TableSpec.ColumnSpec columnSpec,
+      byte[] lvEncodedBytes, DataType dataType, int lvLength, int rowId, List<Integer> rowOffset,
+      List<Integer> rowLength, int offset) throws MemoryException {
+    int lvEncodedOffset;
+    int length;
     int numRows = rowId;
 
     VarLengthColumnPageBase page;
@@ -202,10 +249,9 @@ public abstract class VarLengthColumnPageBase extends ColumnPage {
     lvEncodedOffset = 0;
     for (int i = 0; i < numRows; i++) {
       length = rowLength.get(i);
-      page.putBytes(i, lvEncodedBytes, lvEncodedOffset + 4, length);
-      lvEncodedOffset += 4 + length;
+      page.putBytes(i, lvEncodedBytes, lvEncodedOffset + lvLength, length);
+      lvEncodedOffset += lvLength + length;
     }
-
     return page;
   }
 
@@ -353,6 +399,32 @@ public abstract class VarLengthColumnPageBase extends ColumnPage {
     return data;
   }
 
+  @Override public byte[] getComplexChildrenLVFlattenedBytePage() throws IOException {
+    // output LV encoded byte array
+    int offset = 0;
+    byte[] data = new byte[totalLength + pageSize * 2];
+    for (int rowId = 0; rowId < pageSize; rowId++) {
+      short length = (short) (rowOffset[rowId + 1] - rowOffset[rowId]);
+      ByteUtil.setShort(data, offset, length);
+      copyBytes(rowId, data, offset + 2, length);
+      offset += 2 + length;
+    }
+    return data;
+  }
+
+  @Override
+  public byte[] getComplexParentFlattenedBytePage() throws IOException {
+    // output LV encoded byte array
+    int offset = 0;
+    byte[] data = new byte[totalLength];
+    for (int rowId = 0; rowId < pageSize; rowId++) {
+      short length = (short) (rowOffset[rowId + 1] - rowOffset[rowId]);
+      copyBytes(rowId, data, offset, length);
+      offset += length;
+    }
+    return data;
+  }
+
   @Override
   public void convertValue(ColumnPageValueConverter codec) {
     throw new UnsupportedOperationException("invalid data type: " + dataType);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoder.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoder.java
index dfdca02..8bff5cc 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoder.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoder.java
@@ -141,15 +141,16 @@ public abstract class ColumnPageEncoder {
     Iterator<byte[][]> iterator = input.iterator();
     while (iterator.hasNext()) {
       byte[][] subColumnPage = iterator.next();
-      encodedPages[index++] = encodeChildColumn(subColumnPage);
+      encodedPages[index] = encodeChildColumn(subColumnPage, input.getComplexColumnType(index));
+      index++;
     }
     return encodedPages;
   }
 
-  private static EncodedColumnPage encodeChildColumn(byte[][] data)
+  private static EncodedColumnPage encodeChildColumn(byte[][] data, ColumnType complexDataType)
       throws IOException, MemoryException {
-    TableSpec.ColumnSpec spec = TableSpec.ColumnSpec.newInstance("complex_inner_column",
-        DataTypes.BYTE_ARRAY, ColumnType.COMPLEX);
+    TableSpec.ColumnSpec spec = TableSpec.ColumnSpec
+        .newInstance("complex_inner_column", DataTypes.BYTE_ARRAY, complexDataType);
     ColumnPage page = ColumnPage.wrapByteArrayPage(spec, data);
     ColumnPageEncoder encoder = new DirectCompressCodec(DataTypes.BYTE_ARRAY).createEncoder(null);
     return encoder.encode(page);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/scan/complextypes/PrimitiveQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/complextypes/PrimitiveQueryType.java b/core/src/main/java/org/apache/carbondata/core/scan/complextypes/PrimitiveQueryType.java
index edae4da..899957e 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/complextypes/PrimitiveQueryType.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/complextypes/PrimitiveQueryType.java
@@ -96,7 +96,7 @@ public class PrimitiveQueryType extends ComplexQueryType implements GenericQuery
       int pageNumber, DataOutputStream dataOutputStream) throws IOException {
     byte[] currentVal = copyBlockDataChunk(rawColumnChunks, rowNumber, pageNumber);
     if (!this.isDictionary) {
-      dataOutputStream.writeInt(currentVal.length);
+      dataOutputStream.writeShort(currentVal.length);
     }
     dataOutputStream.write(currentVal);
   }
@@ -120,7 +120,7 @@ public class PrimitiveQueryType extends ComplexQueryType implements GenericQuery
       actualData = directDictionaryGenerator.getValueFromSurrogate(surrgateValue);
     } else if (!isDictionary) {
       // No Dictionary Columns
-      int size = dataBuffer.getInt();
+      int size = dataBuffer.getShort();
       byte[] value = new byte[size];
       dataBuffer.get(value, 0, size);
       if (dataType == DataTypes.DATE) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/scan/complextypes/StructQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/complextypes/StructQueryType.java b/core/src/main/java/org/apache/carbondata/core/scan/complextypes/StructQueryType.java
index 9ff8252..301eb5a 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/complextypes/StructQueryType.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/complextypes/StructQueryType.java
@@ -81,8 +81,8 @@ public class StructQueryType extends ComplexQueryType implements GenericQueryTyp
       int pageNumber, DataOutputStream dataOutputStream) throws IOException {
     byte[] input = copyBlockDataChunk(dimensionColumnDataChunks, rowNumber, pageNumber);
     ByteBuffer byteArray = ByteBuffer.wrap(input);
-    int childElement = byteArray.getInt();
-    dataOutputStream.writeInt(childElement);
+    int childElement = byteArray.getShort();
+    dataOutputStream.writeShort(childElement);
     if (childElement > 0) {
       for (int i = 0; i < childElement; i++) {
         children.get(i)
@@ -102,13 +102,11 @@ public class StructQueryType extends ComplexQueryType implements GenericQueryTyp
   }
 
   @Override public Object getDataBasedOnDataType(ByteBuffer dataBuffer) {
-    int childLength = dataBuffer.getInt();
+    int childLength = dataBuffer.getShort();
     Object[] fields = new Object[childLength];
     for (int i = 0; i < childLength; i++) {
       fields[i] =  children.get(i).getDataBasedOnDataType(dataBuffer);
     }
-
     return DataTypeUtil.getDataTypeConverter().wrapWithGenericRow(fields);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
index 661384c..1df60c1 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
@@ -521,6 +521,10 @@ public final class ByteUtil {
         (((int)bytes[offset + 2] & 0xff) << 8) + ((int)bytes[offset + 3] & 0xff);
   }
 
+  public static int toShort(byte[] bytes, int offset) {
+    return (((int)bytes[offset] & 0xff) << 8) + ((int)bytes[offset + 1] & 0xff);
+  }
+
   public static void setInt(byte[] data, int offset, int value) {
     data[offset] = (byte) (value >> 24);
     data[offset + 1] = (byte) (value >> 16);
@@ -528,6 +532,11 @@ public final class ByteUtil {
     data[offset + 3] = (byte) value;
   }
 
+  public static void setShort(byte[] data, int offset, int value) {
+    data[offset] = (byte) (value >> 8);
+    data[offset + 1] = (byte) value;
+  }
+
   /**
    * long => byte[]
    *

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
index ccfb231..7f9023b 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
@@ -17,9 +17,11 @@
 
 package org.apache.carbondata.spark.testsuite.createTable
 
-import java.io.{File}
+import java.io.File
 import java.util
+import java.util.ArrayList
 
+import scala.collection.mutable.ArrayBuffer
 
 import org.apache.avro
 import org.apache.commons.io.FileUtils
@@ -28,6 +30,7 @@ import org.apache.spark.sql.test.util.QueryTest
 import org.junit.Assert
 import org.scalatest.BeforeAndAfterAll
 import tech.allegro.schema.json2avro.converter.JsonAvroConverter
+
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.metadata.datatype.{DataTypes, StructField}
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
@@ -195,8 +198,8 @@ class TestNonTransactionalCarbonTableWithComplexType extends QueryTest with Befo
 
     val fld = new util.ArrayList[StructField]
     fld.add(new StructField("DoorNum",
-        DataTypes.createArrayType(DataTypes.createStructType(address)),
-        subFld))
+      DataTypes.createArrayType(DataTypes.createStructType(address)),
+      subFld))
     // array of struct of struct
     val doorNum = new util.ArrayList[StructField]
     doorNum.add(new StructField("FloorNum",
@@ -229,4 +232,71 @@ class TestNonTransactionalCarbonTableWithComplexType extends QueryTest with Befo
     // drop table should not delete the files
     cleanTestData()
   }
+
+  test("test multi level support : array of array of array of with Double data type") {
+    cleanTestData()
+    val mySchema =  """ {
+                      |	"name": "address",
+                      |	"type": "record",
+                      |	"fields": [
+                      |		{
+                      |			"name": "name",
+                      |			"type": "string"
+                      |		},
+                      |		{
+                      |			"name": "age",
+                      |			"type": "int"
+                      |		},
+                      |		{
+                      |   "name" :"my_address",
+                      |   "type" :{
+                      |							"name": "my_address",
+                      |							"type": "record",
+                      |							"fields": [
+                      |               {
+                      |									"name": "Temperaturetest",
+                      |									"type": "double"
+                      |								}
+                      |							]
+                      |       }
+                      |			}
+                      |	]
+                      |} """.stripMargin
+
+    val jsonvalue=
+      """{
+        |"name" :"babu",
+        |"age" :12,
+        |"my_address" :{ "Temperaturetest" :123 }
+        |}
+      """.stripMargin
+    val pschema= org.apache.avro.Schema.parse(mySchema)
+
+    val records=new JsonAvroConverter().convertToGenericDataRecord(jsonvalue.getBytes(CharEncoding.UTF_8),pschema)
+
+    val fieds = new Array[Field](3)
+    fieds(0)=new Field("name",DataTypes.STRING);
+    fieds(1)=new Field("age",DataTypes.INT)
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("Temperature", DataTypes.DOUBLE))
+    fieds(2) = new Field("my_address", "struct", fld)
+
+
+    val writer=CarbonWriter.builder().withSchema(new Schema(fieds)).outputPath(writerPath).buildWriterForAvroInput()
+    writer.write(records)
+    writer.close()
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    sql("select * from sdkOutputTable").show(false)
+
+    // TODO: Add a validation
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    cleanTestData()
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
index cc2619e..4ce80a6 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
@@ -23,6 +23,7 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.carbondata.core.datastore.ColumnType;
 import org.apache.carbondata.core.devapi.DictionaryGenerationException;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
 import org.apache.carbondata.core.keygenerator.KeyGenerator;
@@ -286,4 +287,10 @@ public class ArrayDataType implements GenericDataType<ArrayObject> {
   public GenericDataType<ArrayObject> deepCopy() {
     return new ArrayDataType(this.outputArrayIndex, this.dataCounter, this.children.deepCopy());
   }
+
+  @Override
+  public void getChildrenType(List<ColumnType> type) {
+    type.add(ColumnType.COMPLEX_ARRAY);
+    children.getChildrenType(type);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
index f48a91d..8b1ccf2 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
@@ -23,6 +23,7 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.carbondata.core.datastore.ColumnType;
 import org.apache.carbondata.core.devapi.DictionaryGenerationException;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
 import org.apache.carbondata.core.keygenerator.KeyGenerator;
@@ -155,4 +156,7 @@ public interface GenericDataType<T> {
    * clone self for multithread access (for complex type processing in table page)
    */
   GenericDataType<T> deepCopy();
+
+  void getChildrenType(List<ColumnType> type);
+
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
index 481c811..7450b82 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
@@ -33,6 +33,7 @@ import org.apache.carbondata.core.cache.CacheType;
 import org.apache.carbondata.core.cache.dictionary.Dictionary;
 import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastore.ColumnType;
 import org.apache.carbondata.core.devapi.BiDictionary;
 import org.apache.carbondata.core.devapi.DictionaryGenerationException;
 import org.apache.carbondata.core.dictionary.client.DictionaryClient;
@@ -362,17 +363,17 @@ public class PrimitiveDataType implements GenericDataType<Object> {
 
   private void updateValueToByteStream(DataOutputStream dataOutputStream, byte[] value)
       throws IOException {
-    dataOutputStream.writeInt(value.length);
+    dataOutputStream.writeShort(value.length);
     dataOutputStream.write(value);
   }
 
   private void updateNullValue(DataOutputStream dataOutputStream, BadRecordLogHolder logHolder)
       throws IOException {
     if (this.carbonDimension.getDataType() == DataTypes.STRING) {
-      dataOutputStream.writeInt(CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY.length);
+      dataOutputStream.writeShort(CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY.length);
       dataOutputStream.write(CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY);
     } else {
-      dataOutputStream.writeInt(CarbonCommonConstants.EMPTY_BYTE_ARRAY.length);
+      dataOutputStream.writeShort(CarbonCommonConstants.EMPTY_BYTE_ARRAY.length);
       dataOutputStream.write(CarbonCommonConstants.EMPTY_BYTE_ARRAY);
     }
     String message = logHolder.getColumnMessageMap().get(carbonDimension.getColName());
@@ -396,8 +397,8 @@ public class PrimitiveDataType implements GenericDataType<Object> {
       KeyGenerator[] generator)
       throws IOException, KeyGenException {
     if (!this.isDictionary) {
-      int sizeOfData = byteArrayInput.getInt();
-      dataOutputStream.writeInt(sizeOfData);
+      int sizeOfData = byteArrayInput.getShort();
+      dataOutputStream.writeShort(sizeOfData);
       byte[] bb = new byte[sizeOfData];
       byteArrayInput.get(bb, 0, sizeOfData);
       dataOutputStream.write(bb);
@@ -438,7 +439,7 @@ public class PrimitiveDataType implements GenericDataType<Object> {
   @Override public void getColumnarDataForComplexType(List<ArrayList<byte[]>> columnsArray,
       ByteBuffer inputArray) {
     if (!isDictionary) {
-      byte[] key = new byte[inputArray.getInt()];
+      byte[] key = new byte[inputArray.getShort()];
       inputArray.get(key);
       columnsArray.get(outputArrayIndex).add(key);
     } else {
@@ -506,4 +507,8 @@ public class PrimitiveDataType implements GenericDataType<Object> {
 
     return dataType;
   }
+
+  public void getChildrenType(List<ColumnType> type) {
+    type.add(ColumnType.COMPLEX_PRIMITIVE);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
index bb3da6c..b66eef7 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
@@ -23,6 +23,7 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.carbondata.core.datastore.ColumnType;
 import org.apache.carbondata.core.devapi.DictionaryGenerationException;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
 import org.apache.carbondata.core.keygenerator.KeyGenerator;
@@ -153,7 +154,7 @@ public class StructDataType implements GenericDataType<StructObject> {
 
   @Override public void writeByteArray(StructObject input, DataOutputStream dataOutputStream,
       BadRecordLogHolder logHolder) throws IOException, DictionaryGenerationException {
-    dataOutputStream.writeInt(children.size());
+    dataOutputStream.writeShort(children.size());
     if (input == null) {
       for (int i = 0; i < children.size(); i++) {
         children.get(i).writeByteArray(null, dataOutputStream, logHolder);
@@ -191,9 +192,8 @@ public class StructDataType implements GenericDataType<StructObject> {
   @Override public void parseComplexValue(ByteBuffer byteArrayInput,
       DataOutputStream dataOutputStream, KeyGenerator[] generator)
       throws IOException, KeyGenException {
-    int childElement = byteArrayInput.getInt();
-    dataOutputStream.writeInt(childElement);
-
+    short childElement = byteArrayInput.getShort();
+    dataOutputStream.writeShort(childElement);
     for (int i = 0; i < childElement; i++) {
       if (children.get(i) instanceof PrimitiveDataType) {
         if (children.get(i).getIsColumnDictionary()) {
@@ -254,17 +254,10 @@ public class StructDataType implements GenericDataType<StructObject> {
   @Override
   public void getColumnarDataForComplexType(List<ArrayList<byte[]>> columnsArray,
       ByteBuffer inputArray) {
-
-    ByteBuffer b = ByteBuffer.allocate(8);
-    int childElement = inputArray.getInt();
-    b.putInt(childElement);
-    if (childElement == 0) {
-      b.putInt(0);
-    } else {
-      b.putInt(children.get(0).getDataCounter());
-    }
+    ByteBuffer b = ByteBuffer.allocate(2);
+    int childElement = inputArray.getShort();
+    b.putShort((short)childElement);
     columnsArray.get(this.outputArrayIndex).add(b.array());
-
     for (int i = 0; i < childElement; i++) {
       if (children.get(i) instanceof PrimitiveDataType) {
         PrimitiveDataType child = ((PrimitiveDataType) children.get(i));
@@ -301,7 +294,7 @@ public class StructDataType implements GenericDataType<StructObject> {
    */
   @Override
   public void fillBlockKeySize(List<Integer> blockKeySizeWithComplex, int[] primitiveBlockKeySize) {
-    blockKeySizeWithComplex.add(8);
+    blockKeySizeWithComplex.add(2);
     for (int i = 0; i < children.size(); i++) {
       children.get(i).fillBlockKeySize(blockKeySizeWithComplex, primitiveBlockKeySize);
     }
@@ -327,4 +320,11 @@ public class StructDataType implements GenericDataType<StructObject> {
     }
     return new StructDataType(childrenClone, this.outputArrayIndex, this.dataCounter);
   }
+
+  public void getChildrenType(List<ColumnType> type) {
+    type.add(ColumnType.COMPLEX_STRUCT);
+    for (int i = 0; i < children.size(); i++) {
+      children.get(i).getChildrenType(type);
+    }
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java b/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
index 26a634b..5408193 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
@@ -50,7 +50,6 @@ import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.util.DataTypeUtil;
 import org.apache.carbondata.processing.datatypes.GenericDataType;
 
-
 /**
  * Represent a page data for all columns, we store its data in columnar layout, so that
  * all processing apply to TablePage can be done in vectorized fashion.
@@ -205,8 +204,9 @@ public class TablePage {
 
     // initialize the page if first row
     if (rowId == 0) {
-      int depthInComplexColumn = complexDataType.getColsCount();
-      complexDimensionPages[index] = new ComplexColumnPage(pageSize, depthInComplexColumn);
+      List<ColumnType> complexColumnType = new ArrayList<>();
+      complexDataType.getChildrenType(complexColumnType);
+      complexDimensionPages[index] = new ComplexColumnPage(pageSize, complexColumnType);
     }
 
     int depthInComplexColumn = complexDimensionPages[index].getDepth();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6297ea0b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
index a01f0d7..36be65f 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
@@ -459,7 +459,6 @@ public class CarbonWriterBuilder {
         }
         if (field.getChildren() != null && field.getChildren().size() > 0) {
           if (field.getDataType().getName().equalsIgnoreCase("ARRAY")) {
-            checkForUnsupportedDataTypes(field.getChildren().get(0).getDataType());
             // Loop through the inner columns and for a StructData
             DataType complexType =
                 DataTypes.createArrayType(field.getChildren().get(0).getDataType());
@@ -470,7 +469,6 @@ public class CarbonWriterBuilder {
             List<StructField> structFieldsArray =
                 new ArrayList<StructField>(field.getChildren().size());
             for (StructField childFld : field.getChildren()) {
-              checkForUnsupportedDataTypes(childFld.getDataType());
               structFieldsArray
                   .add(new StructField(childFld.getFieldName(), childFld.getDataType()));
             }
@@ -496,13 +494,6 @@ public class CarbonWriterBuilder {
     }
   }
 
-  private void checkForUnsupportedDataTypes(DataType dataType) {
-    if (dataType == DataTypes.DOUBLE || dataType == DataTypes.DATE || DataTypes
-        .isDecimal(dataType)) {
-      throw new RuntimeException("Unsupported data type: " + dataType.getName());
-    }
-  }
-
   /**
    * Save the schema of the {@param table} to {@param persistFilePath}
    * @param table table object containing schema


[30/50] [abbrv] carbondata git commit: [CARBONDATA-2459][DataMap] Add cache for bloom filter datamap

Posted by ma...@apache.org.
[CARBONDATA-2459][DataMap] Add cache for bloom filter datamap

Loading bloom filter from bloomindex file is slow. Adding cache for this procedure will surely improve the query performance

This closes #2300


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/d14c403f
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/d14c403f
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/d14c403f

Branch: refs/heads/spark-2.3
Commit: d14c403f6282ca8b574dae2fa5ab77caa5cf3c18
Parents: ffddba7
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Fri May 11 21:49:43 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Sun May 13 02:05:30 2018 +0800

----------------------------------------------------------------------
 .../core/constants/CarbonCommonConstants.java   |  13 ++
 .../datamap/bloom/BloomCoarseGrainDataMap.java  | 108 +++------
 .../bloom/BloomCoarseGrainDataMapFactory.java   |   4 +
 .../datamap/bloom/BloomDataMapCache.java        | 232 +++++++++++++++++++
 .../datamap/bloom/BloomDataMapWriter.java       |   5 +-
 5 files changed, 283 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/d14c403f/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index 56607b9..f3a821b 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1729,6 +1729,19 @@ public final class CarbonCommonConstants {
   // Property to enable parallel datamap loading for a table
   public static final String CARBON_LOAD_DATAMAPS_PARALLEL = "carbon.load.datamaps.parallel.";
 
+  /**
+   * Cache size in MB for bloom filter datamap. It is an integer and should be greater than 0
+   * and it will be used during query.
+   */
+  @CarbonProperty
+  public static final String CARBON_QUERY_DATAMAP_BLOOM_CACHE_SIZE =
+      "carbon.query.datamap.bloom.cache.size";
+
+  /**
+   * default value in size for cache size of bloom filter datamap.
+   */
+  public static final String CARBON_QUERY_DATAMAP_BLOOM_CACHE_SIZE_DEFAULT_VAL = "512";
+
   private CarbonCommonConstants() {
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d14c403f/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
index 725d5cd..09de25e 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
@@ -17,13 +17,10 @@
 
 package org.apache.carbondata.datamap.bloom;
 
-import java.io.DataInputStream;
-import java.io.EOFException;
+import java.io.File;
 import java.io.IOException;
-import java.io.ObjectInputStream;
 import java.io.UnsupportedEncodingException;
 import java.util.ArrayList;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
@@ -45,13 +42,8 @@ import org.apache.carbondata.core.scan.expression.conditional.EqualToExpression;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.util.CarbonUtil;
 
-import com.google.common.collect.ArrayListMultimap;
-import com.google.common.collect.Multimap;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
 
 /**
  * BloomDataCoarseGrainMap is constructed in blocklet level. For each indexed column,
@@ -62,15 +54,16 @@ import org.apache.hadoop.fs.PathFilter;
 public class BloomCoarseGrainDataMap extends CoarseGrainDataMap {
   private static final LogService LOGGER =
       LogServiceFactory.getLogService(BloomCoarseGrainDataMap.class.getName());
+  public static final String BLOOM_INDEX_SUFFIX = ".bloomindex";
   private Set<String> indexedColumn;
   private List<BloomDMModel> bloomIndexList;
-  private Multimap<String, List<BloomDMModel>> indexCol2BloomDMList;
-  public static final String BLOOM_INDEX_SUFFIX = ".bloomindex";
   private String shardName;
+  private BloomDataMapCache bloomDataMapCache;
+  private Path indexPath;
 
   @Override
   public void init(DataMapModel dataMapModel) throws IOException {
-    Path indexPath = FileFactory.getPath(dataMapModel.getFilePath());
+    this.indexPath = FileFactory.getPath(dataMapModel.getFilePath());
     this.shardName = indexPath.getName();
     FileSystem fs = FileFactory.getFileSystem(indexPath);
     if (!fs.exists(indexPath)) {
@@ -81,52 +74,11 @@ public class BloomCoarseGrainDataMap extends CoarseGrainDataMap {
       throw new IOException(
           String.format("Path %s for Bloom index dataMap must be a directory", indexPath));
     }
-
-    FileStatus[] indexFileStatus = fs.listStatus(indexPath, new PathFilter() {
-      @Override public boolean accept(Path path) {
-        return path.getName().endsWith(BLOOM_INDEX_SUFFIX);
-      }
-    });
-    indexedColumn = new HashSet<String>();
-    bloomIndexList = new ArrayList<BloomDMModel>();
-    indexCol2BloomDMList = ArrayListMultimap.create();
-    for (int i = 0; i < indexFileStatus.length; i++) {
-      String indexfilename = indexFileStatus[i].getPath().getName();
-      String indexCol =
-          indexfilename.substring(0, indexfilename.length() - BLOOM_INDEX_SUFFIX.length());
-      indexedColumn.add(indexCol);
-      List<BloomDMModel> models = readBloomIndex(indexFileStatus[i].getPath().toString());
-      bloomIndexList.addAll(models);
-      indexCol2BloomDMList.put(indexCol, models);
-    }
-    LOGGER.info("find bloom index datamap for column: "
-        + StringUtils.join(indexedColumn, ", "));
+    this.bloomDataMapCache = BloomDataMapCache.getInstance();
   }
 
-  private List<BloomDMModel> readBloomIndex(String indexFile) throws IOException {
-    LOGGER.info("read bloom index from file: " + indexFile);
-    List<BloomDMModel> bloomDMModelList = new ArrayList<BloomDMModel>();
-    DataInputStream dataInStream = null;
-    ObjectInputStream objectInStream = null;
-    try {
-      dataInStream = FileFactory.getDataInputStream(indexFile, FileFactory.getFileType(indexFile));
-      objectInStream = new ObjectInputStream(dataInStream);
-      try {
-        BloomDMModel model = null;
-        while ((model = (BloomDMModel) objectInStream.readObject()) != null) {
-          LOGGER.info("read bloom index: " + model);
-          bloomDMModelList.add(model);
-        }
-      } catch (EOFException e) {
-        LOGGER.info("read " + bloomDMModelList.size() + " bloom indices from " + indexFile);
-      }
-      return bloomDMModelList;
-    } catch (ClassNotFoundException e) {
-      LOGGER.error("Error occrus while reading bloom index");
-      throw new RuntimeException("Error occrus while reading bloom index", e);
-    } finally {
-      CarbonUtil.closeStreams(objectInStream, dataInStream);
-    }
+  public void setIndexedColumn(Set<String> indexedColumn) {
+    this.indexedColumn = indexedColumn;
   }
 
   @Override
@@ -139,26 +91,22 @@ public class BloomCoarseGrainDataMap extends CoarseGrainDataMap {
     }
 
     List<BloomQueryModel> bloomQueryModels = getQueryValue(filterExp.getFilterExpression());
-
     for (BloomQueryModel bloomQueryModel : bloomQueryModels) {
-      LOGGER.info("prune blocklet for query: " + bloomQueryModel);
-      for (List<BloomDMModel> bloomDMModels : indexCol2BloomDMList.get(
-          bloomQueryModel.columnName)) {
-        for (BloomDMModel bloomDMModel : bloomDMModels) {
-          boolean scanRequired = bloomDMModel.getBloomFilter().mightContain(
-              convertValueToBytes(bloomQueryModel.dataType, bloomQueryModel.filterValue));
-          if (scanRequired) {
-            LOGGER.info(String.format(
-                "BloomCoarseGrainDataMap: Need to scan -> blocklet#%s",
-                String.valueOf(bloomDMModel.getBlockletNo())));
-            Blocklet blocklet =
-                new Blocklet(shardName, String.valueOf(bloomDMModel.getBlockletNo()));
-            hitBlocklets.add(blocklet);
-          } else {
-            LOGGER.info(String.format(
-                "BloomCoarseGrainDataMap: Skip scan -> blocklet#%s",
-                String.valueOf(bloomDMModel.getBlockletNo())));
-          }
+      LOGGER.debug("prune blocklet for query: " + bloomQueryModel);
+      BloomDataMapCache.CacheKey cacheKey = new BloomDataMapCache.CacheKey(
+          this.indexPath.toString(), bloomQueryModel.columnName);
+      List<BloomDMModel> bloomDMModels = this.bloomDataMapCache.getBloomDMModelByKey(cacheKey);
+      for (BloomDMModel bloomDMModel : bloomDMModels) {
+        boolean scanRequired = bloomDMModel.getBloomFilter().mightContain(
+            convertValueToBytes(bloomQueryModel.dataType, bloomQueryModel.filterValue));
+        if (scanRequired) {
+          LOGGER.debug(String.format("BloomCoarseGrainDataMap: Need to scan -> blocklet#%s",
+              String.valueOf(bloomDMModel.getBlockletNo())));
+          Blocklet blocklet = new Blocklet(shardName, String.valueOf(bloomDMModel.getBlockletNo()));
+          hitBlocklets.add(blocklet);
+        } else {
+          LOGGER.debug(String.format("BloomCoarseGrainDataMap: Skip scan -> blocklet#%s",
+              String.valueOf(bloomDMModel.getBlockletNo())));
         }
       }
     }
@@ -228,12 +176,20 @@ public class BloomCoarseGrainDataMap extends CoarseGrainDataMap {
     bloomIndexList = null;
   }
 
+  /**
+   * get bloom index file
+   * @param shardPath path for the shard
+   * @param colName index column name
+   */
+  public static String getBloomIndexFile(String shardPath, String colName) {
+    return shardPath.concat(File.separator).concat(colName).concat(BLOOM_INDEX_SUFFIX);
+  }
   static class BloomQueryModel {
     private String columnName;
     private DataType dataType;
     private Object filterValue;
 
-    public BloomQueryModel(String columnName, DataType dataType, Object filterValue) {
+    private BloomQueryModel(String columnName, DataType dataType, Object filterValue) {
       this.columnName = columnName;
       this.dataType = dataType;
       this.filterValue = filterValue;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d14c403f/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
index 95c21fa..581c3a6 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
@@ -20,6 +20,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Objects;
 
@@ -189,6 +190,7 @@ public class BloomCoarseGrainDataMapFactory extends DataMapFactory<CoarseGrainDa
       for (CarbonFile carbonFile : carbonFiles) {
         BloomCoarseGrainDataMap bloomDM = new BloomCoarseGrainDataMap();
         bloomDM.init(new DataMapModel(carbonFile.getAbsolutePath()));
+        bloomDM.setIndexedColumn(new HashSet<String>(dataMapMeta.getIndexedColumnNames()));
         dataMaps.add(bloomDM);
       }
     } catch (Exception e) {
@@ -204,6 +206,8 @@ public class BloomCoarseGrainDataMapFactory extends DataMapFactory<CoarseGrainDa
     BloomCoarseGrainDataMap bloomCoarseGrainDataMap = new BloomCoarseGrainDataMap();
     String indexPath = ((BloomDataMapDistributable) distributable).getIndexPath();
     bloomCoarseGrainDataMap.init(new DataMapModel(indexPath));
+    bloomCoarseGrainDataMap.setIndexedColumn(
+        new HashSet<String>(dataMapMeta.getIndexedColumnNames()));
     coarseGrainDataMaps.add(bloomCoarseGrainDataMap);
     return coarseGrainDataMaps;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d14c403f/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapCache.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapCache.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapCache.java
new file mode 100644
index 0000000..fc23f33
--- /dev/null
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapCache.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.datamap.bloom;
+
+import java.io.DataInputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastore.impl.FileFactory;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.CacheStats;
+import com.google.common.cache.LoadingCache;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+
+/**
+ * This class is used to add cache for bloomfilter datamap to accelerate query through it.
+ * The cache is implemented using guava cache and is a singleton which will be shared by all the
+ * bloomfilter datamaps.
+ * As for the cache, the key is a bloomindex file for a shard and the value is the bloomfilters
+ * for the blocklets in this shard.
+ * The size of cache can be configurable through CarbonProperties and the cache will be expired if
+ * no one access it in the past 2 hours.
+ */
+@InterfaceAudience.Internal
+public class BloomDataMapCache implements Serializable {
+  private static final LogService LOGGER = LogServiceFactory.getLogService(
+      BloomDataMapCache.class.getName());
+  private static final long serialVersionUID = 20160822L;
+  private static final int DEFAULT_CACHE_EXPIRED_HOURS = 2;
+  private LoadingCache<CacheKey, List<BloomDMModel>> bloomDMCache = null;
+
+  private BloomDataMapCache() {
+    RemovalListener<CacheKey, List<BloomDMModel>> listener =
+        new RemovalListener<CacheKey, List<BloomDMModel>>() {
+      @Override
+      public void onRemoval(RemovalNotification<CacheKey, List<BloomDMModel>> notification) {
+        LOGGER.info(
+            String.format("Remove bloom datamap entry %s from cache due to %s",
+                notification.getKey(), notification.getCause()));
+      }
+    };
+    CacheLoader<CacheKey, List<BloomDMModel>> cacheLoader =
+        new CacheLoader<CacheKey, List<BloomDMModel>>() {
+      @Override
+      public List<BloomDMModel> load(CacheKey key) throws Exception {
+        LOGGER.info(String.format("Load bloom datamap entry %s to cache", key));
+        return loadBloomDataMapModel(key);
+      }
+    };
+
+    int cacheSizeInBytes = validateAndGetCacheSize()
+        * CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR
+        * CarbonCommonConstants.BYTE_TO_KB_CONVERSION_FACTOR;
+    this.bloomDMCache = CacheBuilder.newBuilder()
+        .recordStats()
+        .maximumSize(cacheSizeInBytes)
+        .expireAfterAccess(DEFAULT_CACHE_EXPIRED_HOURS, TimeUnit.HOURS)
+        .removalListener(listener)
+        .build(cacheLoader);
+  }
+
+  private static class SingletonHolder {
+    private static final BloomDataMapCache INSTANCE = new BloomDataMapCache();
+  }
+
+  /**
+   * get instance
+   */
+  public static BloomDataMapCache getInstance() {
+    return SingletonHolder.INSTANCE;
+  }
+
+  /**
+   * for resolve from serialized
+   */
+  protected Object readResolve() {
+    return getInstance();
+  }
+
+  private int validateAndGetCacheSize() {
+    String cacheSizeStr = CarbonProperties.getInstance().getProperty(
+        CarbonCommonConstants.CARBON_QUERY_DATAMAP_BLOOM_CACHE_SIZE,
+        CarbonCommonConstants.CARBON_QUERY_DATAMAP_BLOOM_CACHE_SIZE_DEFAULT_VAL);
+    int cacheSize;
+    try {
+      cacheSize = Integer.parseInt(cacheSizeStr);
+      if (cacheSize <= 0) {
+        throw new NumberFormatException("Value should be greater than 0: " + cacheSize);
+      }
+    } catch (NumberFormatException ex) {
+      LOGGER.error(String.format(
+          "The value '%s' for '%s' is invalid, it must be an Integer that greater than 0."
+              + " Use default value '%s' instead.", cacheSizeStr,
+          CarbonCommonConstants.CARBON_QUERY_DATAMAP_BLOOM_CACHE_SIZE,
+          CarbonCommonConstants.CARBON_QUERY_DATAMAP_BLOOM_CACHE_SIZE_DEFAULT_VAL));
+      cacheSize = Integer.parseInt(
+          CarbonCommonConstants.CARBON_QUERY_DATAMAP_BLOOM_CACHE_SIZE_DEFAULT_VAL);
+    }
+    return cacheSize;
+  }
+
+  /**
+   * load datamap from bloomindex file
+   */
+  private List<BloomDMModel> loadBloomDataMapModel(CacheKey cacheKey) {
+    DataInputStream dataInStream = null;
+    ObjectInputStream objectInStream = null;
+    List<BloomDMModel> bloomDMModels = new ArrayList<BloomDMModel>();
+    try {
+      String indexFile = getIndexFileFromCacheKey(cacheKey);
+      dataInStream = FileFactory.getDataInputStream(indexFile, FileFactory.getFileType(indexFile));
+      objectInStream = new ObjectInputStream(dataInStream);
+      try {
+        BloomDMModel model = null;
+        while ((model = (BloomDMModel) objectInStream.readObject()) != null) {
+          bloomDMModels.add(model);
+        }
+      } catch (EOFException e) {
+        LOGGER.info(String.format("Read %d bloom indices from %s",
+            bloomDMModels.size(), indexFile));
+      }
+      this.bloomDMCache.put(cacheKey, bloomDMModels);
+      return bloomDMModels;
+    } catch (ClassNotFoundException | IOException e) {
+      LOGGER.error(e, "Error occurs while reading bloom index");
+      throw new RuntimeException("Error occurs while reading bloom index", e);
+    } finally {
+      clear();
+      CarbonUtil.closeStreams(objectInStream, dataInStream);
+    }
+  }
+
+  /**
+   * get bloom index file name from cachekey
+   */
+  private String getIndexFileFromCacheKey(CacheKey cacheKey) {
+    return BloomCoarseGrainDataMap.getBloomIndexFile(cacheKey.shardPath, cacheKey.indexColumn);
+  }
+
+  /**
+   * get bloom datamap from cache
+   */
+  public List<BloomDMModel> getBloomDMModelByKey(CacheKey cacheKey) {
+    return this.bloomDMCache.getUnchecked(cacheKey);
+  }
+
+  /**
+   * get cache status
+   */
+  private String getCacheStatus() {
+    StringBuilder sb = new StringBuilder();
+    CacheStats stats = this.bloomDMCache.stats();
+    sb.append("hitCount: ").append(stats.hitCount()).append(System.lineSeparator())
+        .append("hitRate: ").append(stats.hitCount()).append(System.lineSeparator())
+        .append("loadCount: ").append(stats.loadCount()).append(System.lineSeparator())
+        .append("averageLoadPenalty: ").append(stats.averageLoadPenalty())
+        .append(System.lineSeparator())
+        .append("evictionCount: ").append(stats.evictionCount());
+    return sb.toString();
+  }
+
+  /**
+   * clear this cache
+   */
+  private void clear() {
+    LOGGER.info(String.format("Current meta cache statistic: %s", getCacheStatus()));
+    LOGGER.info("Trigger invalid all the cache for bloom datamap");
+    this.bloomDMCache.invalidateAll();
+  }
+
+  public static class CacheKey {
+    private String shardPath;
+    private String indexColumn;
+
+    CacheKey(String shardPath, String indexColumn) {
+      this.shardPath = shardPath;
+      this.indexColumn = indexColumn;
+    }
+
+    @Override
+    public String toString() {
+      final StringBuilder sb = new StringBuilder("CacheKey{");
+      sb.append("shardPath='").append(shardPath).append('\'');
+      sb.append(", indexColumn='").append(indexColumn).append('\'');
+      sb.append('}');
+      return sb.toString();
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) return true;
+      if (!(o instanceof CacheKey)) return false;
+      CacheKey cacheKey = (CacheKey) o;
+      return Objects.equals(shardPath, cacheKey.shardPath)
+          && Objects.equals(indexColumn, cacheKey.indexColumn);
+    }
+
+    @Override
+    public int hashCode() {
+      return Objects.hash(shardPath, indexColumn);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d14c403f/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java
index a55de11..f6eb331 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java
@@ -25,7 +25,6 @@ import java.util.List;
 import org.apache.carbondata.common.annotations.InterfaceAudience;
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.datamap.dev.DataMapWriter;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
@@ -138,8 +137,8 @@ public class BloomDataMapWriter extends DataMapWriter {
     }
     List<CarbonColumn> indexColumns = getIndexColumns();
     for (int indexColId = 0; indexColId < indexColumns.size(); indexColId++) {
-      String dmFile = dataMapPath + CarbonCommonConstants.FILE_SEPARATOR +
-          indexColumns.get(indexColId).getColName() + BloomCoarseGrainDataMap.BLOOM_INDEX_SUFFIX;
+      String dmFile = BloomCoarseGrainDataMap.getBloomIndexFile(dataMapPath,
+          indexColumns.get(indexColId).getColName());
       DataOutputStream dataOutStream = null;
       ObjectOutputStream objectOutStream = null;
       try {


[22/50] [abbrv] carbondata git commit: [CARBONDATA-2455]Fix _System Folder creation and lucene AND, OR, NOT Filter fix

Posted by ma...@apache.org.
[CARBONDATA-2455]Fix _System Folder creation and lucene AND,OR,NOT Filter fix

This closes #2281


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/61afa42d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/61afa42d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/61afa42d

Branch: refs/heads/spark-2.3
Commit: 61afa42daafa03a7e8e0eb980cb6d667d6304785
Parents: 6b70b7e
Author: Indhumathi27 <in...@gmail.com>
Authored: Tue May 8 15:36:47 2018 +0530
Committer: Jacky Li <ja...@qq.com>
Committed: Fri May 11 09:29:19 2018 +0800

----------------------------------------------------------------------
 .../schema/table/DiskBasedDMSchemaStorageProvider.java   |  2 +-
 .../carbondata/datamap/lucene/LuceneDataMapWriter.java   |  1 -
 .../datamap/lucene/LuceneFineGrainDataMap.java           |  3 +--
 .../cluster/sdv/generated/LuceneTestCase.scala           |  7 +++----
 .../datamap/lucene/LuceneFineGrainDataMapSuite.scala     | 11 +++++------
 5 files changed, 10 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/61afa42d/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DiskBasedDMSchemaStorageProvider.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DiskBasedDMSchemaStorageProvider.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DiskBasedDMSchemaStorageProvider.java
index e1a929c..4ebbcd0 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DiskBasedDMSchemaStorageProvider.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DiskBasedDMSchemaStorageProvider.java
@@ -55,7 +55,7 @@ public class DiskBasedDMSchemaStorageProvider implements DataMapSchemaStoragePro
   private Set<DataMapSchema> dataMapSchemas = new HashSet<>();
 
   public DiskBasedDMSchemaStorageProvider(String storePath) {
-    this.storePath = storePath;
+    this.storePath = CarbonUtil.checkAndAppendHDFSUrl(storePath);
     this.mdtFilePath = storePath + CarbonCommonConstants.FILE_SEPARATOR + "datamap.mdtfile";
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/61afa42d/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapWriter.java
----------------------------------------------------------------------
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapWriter.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapWriter.java
index 12fa1ef..3615936 100644
--- a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapWriter.java
+++ b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapWriter.java
@@ -274,7 +274,6 @@ public class LuceneDataMapWriter extends DataMapWriter {
       }
     } else if (type == DataTypes.STRING) {
       byte[] value = (byte[]) data;
-      // TODO: how to get string value
       String strValue = null;
       try {
         strValue = new String(value, 2, value.length - 2, "UTF-8");

http://git-wip-us.apache.org/repos/asf/carbondata/blob/61afa42d/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMap.java
----------------------------------------------------------------------
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMap.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMap.java
index 8e457b7..f8d1b12 100644
--- a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMap.java
+++ b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMap.java
@@ -205,8 +205,7 @@ public class LuceneFineGrainDataMap extends FineGrainDataMap {
     queryParser.setAllowLeadingWildcard(true);
     Query query;
     try {
-      // always send lowercase string to lucene as it is case sensitive
-      query = queryParser.parse(strQuery.toLowerCase());
+      query = queryParser.parse(strQuery);
     } catch (ParseException e) {
       String errorMessage = String.format(
           "failed to filter block with query %s, detail is %s", strQuery, e.getMessage());

http://git-wip-us.apache.org/repos/asf/carbondata/blob/61afa42d/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/LuceneTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/LuceneTestCase.scala b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/LuceneTestCase.scala
index d111594..54d0d11 100644
--- a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/LuceneTestCase.scala
+++ b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/LuceneTestCase.scala
@@ -110,14 +110,13 @@ class LuceneTestCase extends QueryTest with BeforeAndAfterAll {
     checkAnswer(sql("SELECT * FROM datamap_main WHERE TEXT_MATCH('country:ch*')"),
       sql("select * from datamap_main where country like 'ch%'"))
     checkAnswer(sql(
-      "SELECT * FROM datamap_main WHERE TEXT_MATCH('country:ch*') AND TEXT_MATCH('name:aa*')"),
+      "SELECT * FROM datamap_main WHERE TEXT_MATCH('country:ch* AND name:aa*')"),
       sql("select * from datamap_main where country like 'ch%' and name like 'aa%'"))
     checkAnswer(sql(
-      "SELECT * FROM datamap_main WHERE TEXT_MATCH('country:u* or name:aa*')"),
+      "SELECT * FROM datamap_main WHERE TEXT_MATCH('country:u* OR name:aa*')"),
       sql("select * from datamap_main where country like 'u%'or name like 'aa%'"))
     checkAnswer(sql(
-      "SELECT * FROM datamap_main WHERE TEXT_MATCH('country:u*') OR TEXT_MATCH('name:aaa1*') AND " +
-      "TEXT_MATCH('name:aaa2*')"),
+      "SELECT * FROM datamap_main WHERE TEXT_MATCH('country:u* OR (name:aaa1* AND name:aaa2*)')"),
       sql(
         "select * from datamap_main where country like 'u%' OR name like 'aaa1%' AND name like " +
         "'aaa2%'"))

http://git-wip-us.apache.org/repos/asf/carbondata/blob/61afa42d/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index 9981ff5..6d2eb3f 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -290,7 +290,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
       """.stripMargin)
     sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test_table OPTIONS('header'='false')")
     checkAnswer(sql(
-      "SELECT * FROM datamap_test_table WHERE TEXT_MATCH('name:n0*') AND TEXT_MATCH(' city:c0*')"),
+      "SELECT * FROM datamap_test_table WHERE TEXT_MATCH('name:n0* AND city:c0*')"),
       sql("select * from datamap_test_table where name like 'n0%' and city like 'c0%'"))
     sql("drop datamap if exists dm on table datamap_test_table")
   }
@@ -311,7 +311,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
       """.stripMargin)
     sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test_table OPTIONS('header'='false')")
     checkAnswer(sql(
-      "SELECT * FROM datamap_test_table WHERE TEXT_MATCH('name:n1*') or TEXT_MATCH('city:c01*')"),
+      "SELECT * FROM datamap_test_table WHERE TEXT_MATCH('name:n1* OR city:c01*')"),
       sql("select * from datamap_test_table where name like 'n1%' or city like 'c01%'"))
     sql("drop datamap if exists dm on table datamap_test_table")
   }
@@ -332,8 +332,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
       """.stripMargin)
     sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test_table OPTIONS('header'='false')")
     checkAnswer(sql(
-      "SELECT * FROM datamap_test_table WHERE TEXT_MATCH('name:n1*') OR TEXT_MATCH ('city:c01*') " +
-      "AND TEXT_MATCH('city:C02*')"),
+      "SELECT * FROM datamap_test_table WHERE TEXT_MATCH('name:n1* OR (city:c01* AND city:c02*)')"),
       sql(
         "select * from datamap_test_table where name like 'n1%' OR city like 'c01%' and city like" +
         " 'c02%'"))
@@ -488,7 +487,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
       sql("select *from datamap_test_table where name like'n1%' AND not name like 'n2%'"))
     //check NOT filter with TEXTMATCH wildcard-search using AND on different columns
     checkAnswer(sql(
-      "select *from datamap_test_table where TEXT_MATCH('name:n1*')AND TEXT_MATCH('city:c01* NOT " +
+      "select *from datamap_test_table where TEXT_MATCH('name:n1* AND city:c01* NOT " +
       "c02*')"),
       sql("select *from datamap_test_table where name like'n1%' AND not city='c02%'"))
     sql("drop datamap if exists dm on table datamap_test_table")
@@ -716,7 +715,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
       sql(s"SELECT * FROM datamap_test5 WHERE city='c020'"))
     sql("DROP TABLE IF EXISTS datamap_test5")
   }
-
+  
   test("test text_match on normal table") {
     sql("DROP TABLE IF EXISTS table1")
     sql(


[10/50] [abbrv] carbondata git commit: [CARBONDATA-2416] Support DEFERRED REBUILD when creating DataMap

Posted by ma...@apache.org.
[CARBONDATA-2416] Support DEFERRED REBUILD when creating DataMap

1. REFRESH DATAMAP is changed to REBUILD DATAMAP command
2. When creating datamap, user can choose to load the datamap immediately or later by manually trigger REBUILD DATAMAP command

This closes #2255


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/747be9b1
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/747be9b1
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/747be9b1

Branch: refs/heads/spark-2.3
Commit: 747be9b111ab0a12e7550124a9facccacf8ad861
Parents: fb12897
Author: Jacky Li <ja...@qq.com>
Authored: Tue May 1 17:17:33 2018 +0800
Committer: QiangCai <qi...@qq.com>
Committed: Wed May 9 18:59:03 2018 +0800

----------------------------------------------------------------------
 .../carbondata/core/datamap/DataMapChooser.java |  75 +++--
 .../core/datamap/DataMapProvider.java           |   3 +-
 .../core/datamap/DataMapStoreManager.java       |  19 +-
 .../core/datamap/dev/DataMapBuilder.java        |  38 +++
 .../core/datamap/dev/DataMapFactory.java        |   5 +-
 .../core/datamap/dev/DataMapRefresher.java      |  36 ---
 .../datamap/status/DataMapStatusDetail.java     |   4 +
 .../datamap/status/DataMapStatusManager.java    |  30 +-
 .../blockletindex/BlockletDataMapFactory.java   |   4 +-
 .../schema/datamap/DataMapProperty.java         |  39 +++
 .../metadata/schema/table/DataMapSchema.java    |  16 +-
 .../bloom/BloomCoarseGrainDataMapFactory.java   |   6 +-
 .../datamap/bloom/BloomDataMapBuilder.java      |  91 ++++++
 .../datamap/bloom/BloomDataMapRefresher.java    |  91 ------
 .../examples/MinMaxIndexDataMapFactory.java     |  30 +-
 .../datamap/lucene/LuceneDataMapBuilder.java    | 224 +++++++++++++
 .../lucene/LuceneDataMapFactoryBase.java        |   9 +-
 .../datamap/lucene/LuceneDataMapRefresher.java  | 224 -------------
 .../hadoop/api/CarbonInputFormat.java           |  32 +-
 .../hadoop/api/CarbonOutputCommitter.java       |   2 +-
 .../lucene/LuceneFineGrainDataMapSuite.scala    |  68 ++--
 .../preaggregate/TestPreAggCreateCommand.scala  |  10 +
 .../testsuite/datamap/CGDataMapTestCase.scala   |   6 +-
 .../testsuite/datamap/DataMapWriterSuite.scala  |   6 +-
 .../testsuite/datamap/FGDataMapTestCase.scala   |   8 +-
 .../testsuite/datamap/TestDataMapStatus.scala   |  71 ++++-
 .../detailquery/SearchModeTestCase.scala        |   2 -
 .../TestInsertAndOtherCommandConcurrent.scala   |  59 ++--
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala |   3 +-
 .../carbondata/datamap/DataMapProperty.java     |  33 --
 .../datamap/IndexDataMapProvider.java           |   2 +-
 .../datamap/PreAggregateDataMapProvider.java    |  12 +-
 .../datamap/IndexDataMapRebuildRDD.scala        | 318 +++++++++++++++++++
 .../datamap/IndexDataMapRefreshRDD.scala        | 317 ------------------
 .../spark/rdd/CarbonDataRDDFactory.scala        |   4 +-
 .../org/apache/spark/sql/CarbonSession.scala    |  11 +
 .../datamap/CarbonCreateDataMapCommand.scala    |  29 +-
 .../datamap/CarbonDataMapRebuildCommand.scala   |  56 ++++
 .../datamap/CarbonDataMapRefreshCommand.scala   |  56 ----
 .../datasources/SparkCarbonFileFormat.scala     |   5 +-
 .../sql/parser/CarbonSpark2SqlParser.scala      |  20 +-
 .../bloom/BloomCoarseGrainDataMapSuite.scala    | 187 +++++++++--
 .../datamap/DataMapWriterListener.java          |   8 +-
 .../store/worker/SearchRequestHandler.java      |  56 ++--
 .../scala/org/apache/spark/rpc/Master.scala     |  18 +-
 .../org/apache/spark/search/Searcher.scala      |   8 +-
 46 files changed, 1318 insertions(+), 1033 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/datamap/DataMapChooser.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapChooser.java b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapChooser.java
index 478254d..7cdabd6 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapChooser.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapChooser.java
@@ -21,7 +21,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
-import java.util.Objects;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.carbondata.common.annotations.InterfaceAudience;
@@ -29,6 +29,8 @@ import org.apache.carbondata.core.datamap.dev.expr.AndDataMapExprWrapper;
 import org.apache.carbondata.core.datamap.dev.expr.DataMapExprWrapper;
 import org.apache.carbondata.core.datamap.dev.expr.DataMapExprWrapperImpl;
 import org.apache.carbondata.core.datamap.dev.expr.OrDataMapExprWrapper;
+import org.apache.carbondata.core.datamap.status.DataMapStatusDetail;
+import org.apache.carbondata.core.datamap.status.DataMapStatusManager;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.scan.expression.ColumnExpression;
 import org.apache.carbondata.core.scan.expression.Expression;
@@ -56,34 +58,42 @@ import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 @InterfaceAudience.Internal
 public class DataMapChooser {
 
-  private static DataMapChooser INSTANCE;
+  private CarbonTable carbonTable;
+  private List<TableDataMap> cgDataMaps;
+  private List<TableDataMap> fgDataMaps;
 
-  private DataMapChooser() { }
-
-  public static DataMapChooser get() {
-    if (INSTANCE == null) {
-      INSTANCE = new DataMapChooser();
+  public DataMapChooser(CarbonTable carbonTable) throws IOException {
+    this.carbonTable = carbonTable;
+    // read all datamaps for this table and populate CG and FG datamap list
+    List<TableDataMap> visibleDataMaps =
+        DataMapStoreManager.getInstance().getAllVisibleDataMap(carbonTable);
+    Map<String, DataMapStatusDetail> map = DataMapStatusManager.readDataMapStatusMap();
+    cgDataMaps = new ArrayList<>(visibleDataMaps.size());
+    fgDataMaps = new ArrayList<>(visibleDataMaps.size());
+    for (TableDataMap visibleDataMap : visibleDataMaps) {
+      DataMapStatusDetail status = map.get(visibleDataMap.getDataMapSchema().getDataMapName());
+      if (status != null && status.isEnabled()) {
+        DataMapLevel level = visibleDataMap.getDataMapFactory().getDataMapLevel();
+        if (level == DataMapLevel.CG) {
+          cgDataMaps.add(visibleDataMap);
+        } else {
+          fgDataMaps.add(visibleDataMap);
+        }
+      }
     }
-    return INSTANCE;
   }
 
   /**
    * Return a chosen datamap based on input filter. See {@link DataMapChooser}
    */
-  public DataMapExprWrapper choose(CarbonTable carbonTable, FilterResolverIntf filter)
-      throws IOException {
-    Objects.requireNonNull(carbonTable);
+  public DataMapExprWrapper choose(FilterResolverIntf filter) {
     if (filter != null) {
       Expression expression = filter.getFilterExpression();
       // First check for FG datamaps if any exist
-      List<TableDataMap> allDataMapFG =
-          DataMapStoreManager.getInstance().getAllVisibleDataMap(carbonTable, DataMapLevel.FG);
-      ExpressionTuple tuple = selectDataMap(expression, allDataMapFG, filter);
+      ExpressionTuple tuple = selectDataMap(expression, fgDataMaps, filter);
       if (tuple.dataMapExprWrapper == null) {
         // Check for CG datamap
-        List<TableDataMap> allDataMapCG =
-            DataMapStoreManager.getInstance().getAllVisibleDataMap(carbonTable, DataMapLevel.CG);
-        tuple = selectDataMap(expression, allDataMapCG, filter);
+        tuple = selectDataMap(expression, cgDataMaps, filter);
       }
       if (tuple.dataMapExprWrapper != null) {
         return tuple.dataMapExprWrapper;
@@ -97,33 +107,22 @@ public class DataMapChooser {
   /**
    * Return a chosen FG datamap based on input filter. See {@link DataMapChooser}
    */
-  public DataMapExprWrapper chooseFGDataMap(CarbonTable carbonTable,
-      FilterResolverIntf resolverIntf) throws IOException {
-    if (resolverIntf != null) {
-      Expression expression = resolverIntf.getFilterExpression();
-      // First check for FG datamaps if any exist
-      List<TableDataMap> allDataMapFG =
-          DataMapStoreManager.getInstance().getAllVisibleDataMap(carbonTable, DataMapLevel.FG);
-      ExpressionTuple tuple = selectDataMap(expression, allDataMapFG, resolverIntf);
-      if (tuple.dataMapExprWrapper != null) {
-        return tuple.dataMapExprWrapper;
-      }
-    }
-    // Return the default datamap if no other datamap exists.
-    return null;
+  public DataMapExprWrapper chooseFGDataMap(FilterResolverIntf resolverIntf) {
+    return chooseDataMap(DataMapLevel.FG, resolverIntf);
   }
 
   /**
    * Return a chosen CG datamap based on input filter. See {@link DataMapChooser}
    */
-  public DataMapExprWrapper chooseCGDataMap(CarbonTable carbonTable,
-      FilterResolverIntf resolverIntf) throws IOException {
+  public DataMapExprWrapper chooseCGDataMap(FilterResolverIntf resolverIntf) {
+    return chooseDataMap(DataMapLevel.CG, resolverIntf);
+  }
+
+  private DataMapExprWrapper chooseDataMap(DataMapLevel level, FilterResolverIntf resolverIntf) {
     if (resolverIntf != null) {
       Expression expression = resolverIntf.getFilterExpression();
-      // Check for CG datamap
-      List<TableDataMap> allDataMapCG =
-          DataMapStoreManager.getInstance().getAllVisibleDataMap(carbonTable, DataMapLevel.CG);
-      ExpressionTuple tuple = selectDataMap(expression, allDataMapCG, resolverIntf);
+      List<TableDataMap> datamaps = level == DataMapLevel.CG ? cgDataMaps : fgDataMaps;
+      ExpressionTuple tuple = selectDataMap(expression, datamaps, resolverIntf);
       if (tuple.dataMapExprWrapper != null) {
         return tuple.dataMapExprWrapper;
       }
@@ -137,7 +136,7 @@ public class DataMapChooser {
    * @param resolverIntf
    * @return
    */
-  public DataMapExprWrapper getDefaultDataMap(CarbonTable carbonTable,
+  public static DataMapExprWrapper getDefaultDataMap(CarbonTable carbonTable,
       FilterResolverIntf resolverIntf) {
     // Return the default datamap if no other datamap exists.
     return new DataMapExprWrapperImpl(

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/datamap/DataMapProvider.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapProvider.java b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapProvider.java
index 775b912..05ba7cf 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapProvider.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapProvider.java
@@ -104,13 +104,12 @@ public abstract class DataMapProvider {
    * Rebuild the datamap by loading all existing data from mainTable
    * This is called when refreshing the datamap when
    * 1. after datamap creation and if `autoRefreshDataMap` is set to true
-   * 2. user manually trigger refresh datamap command
+   * 2. user manually trigger REBUILD DATAMAP command
    */
   public abstract void rebuild() throws IOException, NoSuchDataMapException;
 
   /**
    * Build the datamap incrementally by loading specified segment data
-   * This is called when user manually trigger refresh datamap
    */
   public abstract void incrementalBuild(String[] segmentIds) throws IOException;
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
index 29a1106..9f7af2b 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
@@ -79,26 +79,9 @@ public final class DataMapStoreManager {
   }
 
   /**
-   * It gives all visible datamaps of type @mapType except the default datamap.
-   */
-  public List<TableDataMap> getAllVisibleDataMap(CarbonTable carbonTable, DataMapLevel mapType)
-      throws IOException {
-    List<TableDataMap> dataMaps = new ArrayList<>();
-    List<TableDataMap> tableIndices = getAllVisibleDataMap(carbonTable);
-    if (tableIndices != null) {
-      for (TableDataMap dataMap : tableIndices) {
-        if (mapType == dataMap.getDataMapFactory().getDataMapLevel()) {
-          dataMaps.add(dataMap);
-        }
-      }
-    }
-    return dataMaps;
-  }
-
-  /**
    * It only gives the visible datamaps
    */
-  private List<TableDataMap> getAllVisibleDataMap(CarbonTable carbonTable) throws IOException {
+  List<TableDataMap> getAllVisibleDataMap(CarbonTable carbonTable) throws IOException {
     CarbonSessionInfo sessionInfo = ThreadLocalSessionInfo.getCarbonSessionInfo();
     List<TableDataMap> allDataMaps = getAllDataMap(carbonTable);
     Iterator<TableDataMap> dataMapIterator = allDataMaps.iterator();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapBuilder.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapBuilder.java
new file mode 100644
index 0000000..570a1ce
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapBuilder.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.datamap.dev;
+
+import java.io.IOException;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+
+/**
+ * DataMapBuilder is used to implement REBUILD DATAMAP command, it reads all existing
+ * data in main table and load them into the DataMap. All existing index data will be deleted
+ * if there are existing data in the datamap.
+ */
+@InterfaceAudience.Developer("DataMap")
+public interface DataMapBuilder {
+  void initialize() throws IOException;
+
+  void addRow(int blockletId, int pageId, int rowId, Object[] values) throws IOException;
+
+  void finish() throws IOException;
+
+  void close() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
index ae34be7..ad709a0 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
@@ -61,10 +61,11 @@ public abstract class DataMapFactory<T extends DataMap> {
       throws IOException;
 
   /**
-   * Create a new Refresher for this datamap, to rebuild the specified
+   * Create a new DataMapBuilder for this datamap, to rebuild the specified
    * segment and shard data in the main table.
+   * TODO: refactor to unify with DataMapWriter
    */
-  public abstract DataMapRefresher createRefresher(Segment segment, String shardName)
+  public abstract DataMapBuilder createBuilder(Segment segment, String shardName)
       throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapRefresher.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapRefresher.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapRefresher.java
deleted file mode 100644
index 770ceca..0000000
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapRefresher.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.core.datamap.dev;
-
-import java.io.IOException;
-
-import org.apache.carbondata.common.annotations.InterfaceAudience;
-
-/**
- * Interface to rebuild the datamap for main table with existing data
- */
-@InterfaceAudience.Developer("DataMap")
-public interface DataMapRefresher {
-  void initialize() throws IOException;
-
-  void addRow(int blockletId, int pageId, int rowId, Object[] values) throws IOException;
-
-  void finish() throws IOException;
-
-  void close() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusDetail.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusDetail.java b/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusDetail.java
index 1ecb1b1..d1e5921 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusDetail.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusDetail.java
@@ -51,6 +51,10 @@ public class DataMapStatusDetail implements Serializable {
     return status;
   }
 
+  public boolean isEnabled() {
+    return status == DataMapStatus.ENABLED;
+  }
+
   public void setStatus(DataMapStatus status) {
     this.status = status;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusManager.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusManager.java b/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusManager.java
index dcad80b..b540146 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusManager.java
@@ -18,7 +18,9 @@ package org.apache.carbondata.core.datamap.status;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.carbondata.common.exceptions.sql.NoSuchDataMapException;
 import org.apache.carbondata.core.datamap.DataMapStoreManager;
@@ -51,6 +53,15 @@ public class DataMapStatusManager {
     return storageProvider.getDataMapStatusDetails();
   }
 
+  public static Map<String, DataMapStatusDetail> readDataMapStatusMap() throws IOException {
+    DataMapStatusDetail[] details = storageProvider.getDataMapStatusDetails();
+    Map<String, DataMapStatusDetail> map = new HashMap<>(details.length);
+    for (DataMapStatusDetail detail : details) {
+      map.put(detail.getDataMapName(), detail);
+    }
+    return map;
+  }
+
   public static void disableDataMap(String dataMapName) throws IOException, NoSuchDataMapException {
     DataMapSchema dataMapSchema = getDataMapSchema(dataMapName);
     if (dataMapSchema != null) {
@@ -60,10 +71,19 @@ public class DataMapStatusManager {
     }
   }
 
-  public static void disableDataMapsOfTable(CarbonTable table) throws IOException {
+  /**
+   * This method will disable all lazy (DEFERRED REBUILD) datamap in the given table
+   */
+  public static void disableAllLazyDataMaps(CarbonTable table) throws IOException {
     List<DataMapSchema> allDataMapSchemas =
         DataMapStoreManager.getInstance().getDataMapSchemasOfTable(table);
-    storageProvider.updateDataMapStatus(allDataMapSchemas, DataMapStatus.DISABLED);
+    List<DataMapSchema> dataMapToBeDisabled = new ArrayList<>(allDataMapSchemas.size());
+    for (DataMapSchema dataMap : allDataMapSchemas) {
+      if (dataMap.isLazy()) {
+        dataMapToBeDisabled.add(dataMap);
+      }
+    }
+    storageProvider.updateDataMapStatus(dataMapToBeDisabled, DataMapStatus.DISABLED);
   }
 
   public static void enableDataMap(String dataMapName) throws IOException, NoSuchDataMapException {
@@ -75,12 +95,6 @@ public class DataMapStatusManager {
     }
   }
 
-  public static void enableDataMapsOfTable(CarbonTable table) throws IOException {
-    List<DataMapSchema> allDataMapSchemas =
-        DataMapStoreManager.getInstance().getDataMapSchemasOfTable(table);
-    storageProvider.updateDataMapStatus(allDataMapSchemas, DataMapStatus.ENABLED);
-  }
-
   public static void dropDataMap(String dataMapName) throws IOException, NoSuchDataMapException {
     DataMapSchema dataMapSchema = getDataMapSchema(dataMapName);
     if (dataMapSchema != null) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
index c3df721..e502251 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
@@ -32,7 +32,7 @@ import org.apache.carbondata.core.datamap.DataMapMeta;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.datamap.dev.CacheableDataMap;
 import org.apache.carbondata.core.datamap.dev.DataMap;
-import org.apache.carbondata.core.datamap.dev.DataMapRefresher;
+import org.apache.carbondata.core.datamap.dev.DataMapBuilder;
 import org.apache.carbondata.core.datamap.dev.DataMapWriter;
 import org.apache.carbondata.core.datamap.dev.cgdatamap.CoarseGrainDataMap;
 import org.apache.carbondata.core.datamap.dev.cgdatamap.CoarseGrainDataMapFactory;
@@ -91,7 +91,7 @@ public class BlockletDataMapFactory extends CoarseGrainDataMapFactory
   }
 
   @Override
-  public DataMapRefresher createRefresher(Segment segment, String shardName) {
+  public DataMapBuilder createBuilder(Segment segment, String shardName) {
     throw new UnsupportedOperationException("not implemented");
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/metadata/schema/datamap/DataMapProperty.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/datamap/DataMapProperty.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/datamap/DataMapProperty.java
new file mode 100644
index 0000000..9bd78da
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/datamap/DataMapProperty.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.metadata.schema.datamap;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+
+/**
+ * Property that can be specified when creating DataMap
+ */
+@InterfaceAudience.Internal
+public class DataMapProperty {
+
+  /**
+   * Used to specify the store location of the datamap
+   */
+  public static final String PARTITIONING = "partitioning";
+  public static final String PATH = "path";
+
+  /**
+   * For datamap created with 'WITH DEFERRED REBUILD' syntax, we will add this
+   * property internally
+   */
+  public static final String DEFERRED_REBUILD = "_internal.deferred.rebuild";
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
index b22a3d4..611f298 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/DataMapSchema.java
@@ -28,6 +28,8 @@ import java.util.Objects;
 
 import org.apache.carbondata.common.exceptions.sql.MalformedDataMapCommandException;
 import org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider;
+import org.apache.carbondata.core.metadata.schema.datamap.DataMapProperty;
+
 import static org.apache.carbondata.core.constants.CarbonCommonConstants.INDEX_COLUMNS;
 
 import com.google.gson.Gson;
@@ -157,7 +159,16 @@ public class DataMapSchema implements Serializable, Writable {
     }
   }
 
-  @Override public void write(DataOutput out) throws IOException {
+  /**
+   * Return true if this datamap is lazy (created with DEFERRED REBUILD syntax)
+   */
+  public boolean isLazy() {
+    String deferredRebuild = getProperties().get(DataMapProperty.DEFERRED_REBUILD);
+    return deferredRebuild != null && deferredRebuild.equalsIgnoreCase("true");
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
     out.writeUTF(dataMapName);
     out.writeUTF(providerName);
     boolean isRelationIdentifierExists = null != relationIdentifier;
@@ -181,7 +192,8 @@ public class DataMapSchema implements Serializable, Writable {
     }
   }
 
-  @Override public void readFields(DataInput in) throws IOException {
+  @Override
+  public void readFields(DataInput in) throws IOException {
     this.dataMapName = in.readUTF();
     this.providerName = in.readUTF();
     boolean isRelationIdnentifierExists = in.readBoolean();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
index 1d6eab7..95c21fa 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
@@ -33,9 +33,9 @@ import org.apache.carbondata.core.datamap.DataMapMeta;
 import org.apache.carbondata.core.datamap.DataMapStoreManager;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.datamap.TableDataMap;
+import org.apache.carbondata.core.datamap.dev.DataMapBuilder;
 import org.apache.carbondata.core.datamap.dev.DataMapFactory;
 import org.apache.carbondata.core.datamap.dev.DataMapModel;
-import org.apache.carbondata.core.datamap.dev.DataMapRefresher;
 import org.apache.carbondata.core.datamap.dev.DataMapWriter;
 import org.apache.carbondata.core.datamap.dev.cgdatamap.CoarseGrainDataMap;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
@@ -173,8 +173,8 @@ public class BloomCoarseGrainDataMapFactory extends DataMapFactory<CoarseGrainDa
   }
 
   @Override
-  public DataMapRefresher createRefresher(Segment segment, String shardName) throws IOException {
-    return new BloomDataMapRefresher(getCarbonTable().getTablePath(), this.dataMapName,
+  public DataMapBuilder createBuilder(Segment segment, String shardName) throws IOException {
+    return new BloomDataMapBuilder(getCarbonTable().getTablePath(), this.dataMapName,
         this.dataMapMeta.getIndexedColumns(), segment, shardName,
         this.bloomFilterSize, this.bloomFilterFpp);
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapBuilder.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapBuilder.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapBuilder.java
new file mode 100644
index 0000000..fa1aef7
--- /dev/null
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapBuilder.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.datamap.bloom;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datamap.Segment;
+import org.apache.carbondata.core.datamap.dev.DataMapBuilder;
+import org.apache.carbondata.core.metadata.datatype.DataType;
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
+import org.apache.carbondata.core.util.CarbonUtil;
+
+/**
+ * Implementation for BloomFilter DataMap to rebuild the datamap for main table with existing data
+ */
+@InterfaceAudience.Internal
+public class BloomDataMapBuilder extends BloomDataMapWriter implements DataMapBuilder {
+
+  BloomDataMapBuilder(String tablePath, String dataMapName, List<CarbonColumn> indexColumns,
+      Segment segment, String shardName, int bloomFilterSize, double bloomFilterFpp)
+      throws IOException {
+    super(tablePath, dataMapName, indexColumns, segment, shardName,
+        bloomFilterSize, bloomFilterFpp);
+  }
+
+  @Override
+  public void initialize() throws IOException {
+    super.resetBloomFilters();
+  }
+
+  @Override
+  public void addRow(int blockletId, int pageId, int rowId, Object[] values) {
+    if (currentBlockletId != blockletId) {
+      // new blocklet started, flush bloom filter to datamap fileh
+      super.writeBloomDataMapFile();
+      currentBlockletId = blockletId;
+    }
+    // for each indexed column, add the data to bloom filter
+    List<CarbonColumn> indexColumns = getIndexColumns();
+    for (int i = 0; i < indexColumns.size(); i++) {
+      Object data = values[i];
+      DataType dataType = indexColumns.get(i).getDataType();
+      byte[] indexValue;
+      if (DataTypes.STRING == dataType) {
+        indexValue = getStringData(data);
+      } else if (DataTypes.BYTE_ARRAY == dataType) {
+        byte[] originValue = (byte[]) data;
+        // String and byte array is LV encoded, L is short type
+        indexValue = new byte[originValue.length - 2];
+        System.arraycopy(originValue, 2, indexValue, 0, originValue.length - 2);
+      } else {
+        indexValue = CarbonUtil.getValueAsBytes(dataType, data);
+      }
+      indexBloomFilters.get(i).put(indexValue);
+    }
+  }
+
+  @Override
+  public void finish() throws IOException {
+    super.finish();
+  }
+
+  @Override
+  public void close() throws IOException {
+    releaseResouce();
+  }
+
+  @Override
+  protected byte[] getStringData(Object data) {
+    return ((String) data).getBytes(CarbonCommonConstants.DEFAULT_CHARSET_CLASS);
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapRefresher.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapRefresher.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapRefresher.java
deleted file mode 100644
index 8e05133..0000000
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapRefresher.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.datamap.bloom;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.carbondata.common.annotations.InterfaceAudience;
-import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.datamap.Segment;
-import org.apache.carbondata.core.datamap.dev.DataMapRefresher;
-import org.apache.carbondata.core.metadata.datatype.DataType;
-import org.apache.carbondata.core.metadata.datatype.DataTypes;
-import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
-import org.apache.carbondata.core.util.CarbonUtil;
-
-/**
- * Implementation for BloomFilter DataMap to rebuild the datamap for main table with existing data
- */
-@InterfaceAudience.Internal
-public class BloomDataMapRefresher extends BloomDataMapWriter implements DataMapRefresher {
-
-  BloomDataMapRefresher(String tablePath, String dataMapName, List<CarbonColumn> indexColumns,
-      Segment segment, String shardName, int bloomFilterSize, double bloomFilterFpp)
-      throws IOException {
-    super(tablePath, dataMapName, indexColumns, segment, shardName,
-        bloomFilterSize, bloomFilterFpp);
-  }
-
-  @Override
-  public void initialize() throws IOException {
-    super.resetBloomFilters();
-  }
-
-  @Override
-  public void addRow(int blockletId, int pageId, int rowId, Object[] values) {
-    if (currentBlockletId != blockletId) {
-      // new blocklet started, flush bloom filter to datamap fileh
-      super.writeBloomDataMapFile();
-      currentBlockletId = blockletId;
-    }
-    // for each indexed column, add the data to bloom filter
-    List<CarbonColumn> indexColumns = getIndexColumns();
-    for (int i = 0; i < indexColumns.size(); i++) {
-      Object data = values[i];
-      DataType dataType = indexColumns.get(i).getDataType();
-      byte[] indexValue;
-      if (DataTypes.STRING == dataType) {
-        indexValue = getStringData(data);
-      } else if (DataTypes.BYTE_ARRAY == dataType) {
-        byte[] originValue = (byte[]) data;
-        // String and byte array is LV encoded, L is short type
-        indexValue = new byte[originValue.length - 2];
-        System.arraycopy(originValue, 2, indexValue, 0, originValue.length - 2);
-      } else {
-        indexValue = CarbonUtil.getValueAsBytes(dataType, data);
-      }
-      indexBloomFilters.get(i).put(indexValue);
-    }
-  }
-
-  @Override
-  public void finish() throws IOException {
-    super.finish();
-  }
-
-  @Override
-  public void close() throws IOException {
-    releaseResouce();
-  }
-
-  @Override
-  protected byte[] getStringData(Object data) {
-    return ((String) data).getBytes(CarbonCommonConstants.DEFAULT_CHARSET_CLASS);
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/datamap/examples/src/minmaxdatamap/main/java/org/apache/carbondata/datamap/examples/MinMaxIndexDataMapFactory.java
----------------------------------------------------------------------
diff --git a/datamap/examples/src/minmaxdatamap/main/java/org/apache/carbondata/datamap/examples/MinMaxIndexDataMapFactory.java b/datamap/examples/src/minmaxdatamap/main/java/org/apache/carbondata/datamap/examples/MinMaxIndexDataMapFactory.java
index 4197b79..84b9e65 100644
--- a/datamap/examples/src/minmaxdatamap/main/java/org/apache/carbondata/datamap/examples/MinMaxIndexDataMapFactory.java
+++ b/datamap/examples/src/minmaxdatamap/main/java/org/apache/carbondata/datamap/examples/MinMaxIndexDataMapFactory.java
@@ -21,14 +21,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.carbondata.common.exceptions.sql.MalformedDataMapCommandException;
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.datamap.DataMapDistributable;
 import org.apache.carbondata.core.datamap.DataMapMeta;
 import org.apache.carbondata.core.datamap.Segment;
+import org.apache.carbondata.core.datamap.dev.DataMapBuilder;
 import org.apache.carbondata.core.datamap.dev.DataMapModel;
-import org.apache.carbondata.core.datamap.dev.DataMapRefresher;
 import org.apache.carbondata.core.datamap.dev.DataMapWriter;
 import org.apache.carbondata.core.datamap.dev.cgdatamap.CoarseGrainDataMap;
 import org.apache.carbondata.core.datamap.dev.cgdatamap.CoarseGrainDataMapFactory;
@@ -42,8 +41,6 @@ import org.apache.carbondata.core.scan.filter.intf.ExpressionType;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 import org.apache.carbondata.events.Event;
 
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.collections.Transformer;
 import org.apache.commons.lang3.StringUtils;
 
 /**
@@ -52,25 +49,18 @@ import org.apache.commons.lang3.StringUtils;
 public class MinMaxIndexDataMapFactory extends CoarseGrainDataMapFactory {
   private static final LogService LOGGER = LogServiceFactory.getLogService(
       MinMaxIndexDataMapFactory.class.getName());
-  private DataMapSchema dataMapSchema;
   private DataMapMeta dataMapMeta;
   private String dataMapName;
   private AbsoluteTableIdentifier identifier;
 
-  public MinMaxIndexDataMapFactory(CarbonTable carbonTable) {
-    super(carbonTable);
-  }
+  public MinMaxIndexDataMapFactory(CarbonTable carbonTable, DataMapSchema dataMapSchema) {
+    super(carbonTable, dataMapSchema);
 
-  // this is an example for datamap, we can choose the columns and operations that
-  // will be supported by this datamap. Furthermore, we can add cache-support for this datamap.
-  @Override public void init(DataMapSchema dataMapSchema)
-      throws IOException, MalformedDataMapCommandException {
-    this.dataMapSchema = dataMapSchema;
-    this.identifier = carbonTable.getAbsoluteTableIdentifier();
-    this.dataMapName = dataMapSchema.getDataMapName();
+    // this is an example for datamap, we can choose the columns and operations that
+    // will be supported by this datamap. Furthermore, we can add cache-support for this datamap.
 
     // columns that will be indexed
-    List<CarbonColumn> allColumns = carbonTable.getCreateOrderColumn(identifier.getTableName());
+    List<CarbonColumn> allColumns = getCarbonTable().getCreateOrderColumn(identifier.getTableName());
 
     // operations that will be supported on the indexed columns
     List<ExpressionType> optOperations = new ArrayList<>();
@@ -91,12 +81,14 @@ public class MinMaxIndexDataMapFactory extends CoarseGrainDataMapFactory {
    * @param shardName
    * @return
    */
-  @Override public DataMapWriter createWriter(Segment segment, String shardName) {
-    return new MinMaxDataWriter(carbonTable, dataMapSchema, segment, shardName,
+  @Override
+  public DataMapWriter createWriter(Segment segment, String shardName) {
+    return new MinMaxDataWriter(getCarbonTable(), getDataMapSchema(), segment, shardName,
         dataMapMeta.getIndexedColumns());
   }
 
-  @Override public DataMapRefresher createRefresher(Segment segment, String shardName)
+  @Override
+  public DataMapBuilder createBuilder(Segment segment, String shardName)
       throws IOException {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapBuilder.java
----------------------------------------------------------------------
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapBuilder.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapBuilder.java
new file mode 100644
index 0000000..35c07f0
--- /dev/null
+++ b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapBuilder.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.datamap.lucene;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datamap.Segment;
+import org.apache.carbondata.core.datamap.dev.DataMapBuilder;
+import org.apache.carbondata.core.datastore.impl.FileFactory;
+import org.apache.carbondata.core.metadata.datatype.DataType;
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.core.util.path.CarbonTablePath;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
+import org.apache.lucene.codecs.lucene62.Lucene62Codec;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoublePoint;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatPoint;
+import org.apache.lucene.document.IntPoint;
+import org.apache.lucene.document.IntRangeField;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.solr.store.hdfs.HdfsDirectory;
+
+public class LuceneDataMapBuilder implements DataMapBuilder {
+
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(LuceneDataMapWriter.class.getName());
+
+  private String dataMapPath;
+
+  private List<CarbonColumn> indexColumns;
+
+  private int columnsCount;
+
+  private IndexWriter indexWriter = null;
+
+  private IndexWriter pageIndexWriter = null;
+
+  private Analyzer analyzer = null;
+
+  LuceneDataMapBuilder(String tablePath, String dataMapName,
+      Segment segment, String shardName, List<CarbonColumn> indexColumns) {
+    this.dataMapPath = CarbonTablePath.getDataMapStorePathOnShardName(
+        tablePath, segment.getSegmentNo(), dataMapName, shardName);
+    this.indexColumns = indexColumns;
+    this.columnsCount = indexColumns.size();
+  }
+
+  @Override
+  public void initialize() throws IOException {
+    // get index path, put index data into segment's path
+    Path indexPath = FileFactory.getPath(dataMapPath);
+    FileSystem fs = FileFactory.getFileSystem(indexPath);
+
+    // if index path exists, should delete it because we are
+    // rebuilding the whole datamap for all segments
+    if (fs.exists(indexPath)) {
+      fs.delete(indexPath, true);
+    }
+    if (!fs.mkdirs(indexPath)) {
+      LOGGER.error("Failed to create directory " + indexPath);
+    }
+
+    if (null == analyzer) {
+      analyzer = new StandardAnalyzer();
+    }
+
+    // create a index writer
+    Directory indexDir = new HdfsDirectory(indexPath, FileFactory.getConfiguration());
+
+    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
+    if (CarbonProperties.getInstance()
+        .getProperty(CarbonCommonConstants.CARBON_LUCENE_COMPRESSION_MODE,
+            CarbonCommonConstants.CARBON_LUCENE_COMPRESSION_MODE_DEFAULT)
+        .equalsIgnoreCase(CarbonCommonConstants.CARBON_LUCENE_COMPRESSION_MODE_DEFAULT)) {
+      indexWriterConfig.setCodec(new Lucene62Codec(Lucene50StoredFieldsFormat.Mode.BEST_SPEED));
+    } else {
+      indexWriterConfig
+          .setCodec(new Lucene62Codec(Lucene50StoredFieldsFormat.Mode.BEST_COMPRESSION));
+    }
+
+    indexWriter = new IndexWriter(indexDir, new IndexWriterConfig(analyzer));
+  }
+
+  private IndexWriter createPageIndexWriter() throws IOException {
+    // save index data into ram, write into disk after one page finished
+    RAMDirectory ramDir = new RAMDirectory();
+    return new IndexWriter(ramDir, new IndexWriterConfig(analyzer));
+  }
+
+  private void addPageIndex(IndexWriter pageIndexWriter) throws IOException {
+
+    Directory directory = pageIndexWriter.getDirectory();
+
+    // close ram writer
+    pageIndexWriter.close();
+
+    // add ram index data into disk
+    indexWriter.addIndexes(directory);
+
+    // delete this ram data
+    directory.close();
+  }
+
+  @Override
+  public void addRow(int blockletId, int pageId, int rowId, Object[] values) throws IOException {
+    if (rowId == 0) {
+      if (pageIndexWriter != null) {
+        addPageIndex(pageIndexWriter);
+      }
+      pageIndexWriter = createPageIndexWriter();
+    }
+
+    // create a new document
+    Document doc = new Document();
+
+    // add blocklet Id
+    doc.add(new IntPoint(LuceneDataMapWriter.BLOCKLETID_NAME, (int) values[columnsCount]));
+    doc.add(new StoredField(LuceneDataMapWriter.BLOCKLETID_NAME, (int) values[columnsCount]));
+
+    // add page id
+    doc.add(new IntPoint(LuceneDataMapWriter.PAGEID_NAME, (int) values[columnsCount + 1]));
+    doc.add(new StoredField(LuceneDataMapWriter.PAGEID_NAME, (int) values[columnsCount + 1]));
+
+    // add row id
+    doc.add(new IntPoint(LuceneDataMapWriter.ROWID_NAME, rowId));
+    doc.add(new StoredField(LuceneDataMapWriter.ROWID_NAME, rowId));
+
+    // add other fields
+    for (int colIdx = 0; colIdx < columnsCount; colIdx++) {
+      CarbonColumn column = indexColumns.get(colIdx);
+      addField(doc, column.getColName(), column.getDataType(), values[colIdx]);
+    }
+
+    pageIndexWriter.addDocument(doc);
+  }
+
+  private boolean addField(Document doc, String fieldName, DataType type, Object value) {
+    if (type == DataTypes.STRING) {
+      doc.add(new TextField(fieldName, (String) value, Field.Store.NO));
+    } else if (type == DataTypes.BYTE) {
+      // byte type , use int range to deal with byte, lucene has no byte type
+      IntRangeField field =
+          new IntRangeField(fieldName, new int[] { Byte.MIN_VALUE }, new int[] { Byte.MAX_VALUE });
+      field.setIntValue((int) value);
+      doc.add(field);
+    } else if (type == DataTypes.SHORT) {
+      // short type , use int range to deal with short type, lucene has no short type
+      IntRangeField field = new IntRangeField(fieldName, new int[] { Short.MIN_VALUE },
+          new int[] { Short.MAX_VALUE });
+      field.setShortValue((short) value);
+      doc.add(field);
+    } else if (type == DataTypes.INT) {
+      // int type , use int point to deal with int type
+      doc.add(new IntPoint(fieldName, (int) value));
+    } else if (type == DataTypes.LONG) {
+      // long type , use long point to deal with long type
+      doc.add(new LongPoint(fieldName, (long) value));
+    } else if (type == DataTypes.FLOAT) {
+      doc.add(new FloatPoint(fieldName, (float) value));
+    } else if (type == DataTypes.DOUBLE) {
+      doc.add(new DoublePoint(fieldName, (double) value));
+    } else if (type == DataTypes.DATE) {
+      // TODO: how to get data value
+    } else if (type == DataTypes.TIMESTAMP) {
+      // TODO: how to get
+    } else if (type == DataTypes.BOOLEAN) {
+      IntRangeField field = new IntRangeField(fieldName, new int[] { 0 }, new int[] { 1 });
+      field.setIntValue((boolean) value ? 1 : 0);
+      doc.add(field);
+    } else {
+      LOGGER.error("unsupport data type " + type);
+      throw new RuntimeException("unsupported data type " + type);
+    }
+    return true;
+  }
+
+  @Override
+  public void finish() throws IOException {
+    if (indexWriter != null && pageIndexWriter != null) {
+      addPageIndex(pageIndexWriter);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (indexWriter != null) {
+      indexWriter.close();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java
----------------------------------------------------------------------
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java
index d52cef9..4c6aec3 100644
--- a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java
+++ b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java
@@ -34,8 +34,8 @@ import org.apache.carbondata.core.datamap.DataMapStoreManager;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.datamap.TableDataMap;
 import org.apache.carbondata.core.datamap.dev.DataMap;
+import org.apache.carbondata.core.datamap.dev.DataMapBuilder;
 import org.apache.carbondata.core.datamap.dev.DataMapFactory;
-import org.apache.carbondata.core.datamap.dev.DataMapRefresher;
 import org.apache.carbondata.core.datamap.dev.DataMapWriter;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFileFilter;
@@ -153,15 +153,16 @@ abstract class LuceneDataMapFactoryBase<T extends DataMap> extends DataMapFactor
   }
 
   @Override
-  public DataMapRefresher createRefresher(Segment segment, String shardName) {
-    return new LuceneDataMapRefresher(getCarbonTable().getTablePath(), dataMapName,
+  public DataMapBuilder createBuilder(Segment segment, String shardName) {
+    return new LuceneDataMapBuilder(getCarbonTable().getTablePath(), dataMapName,
         segment, shardName, dataMapMeta.getIndexedColumns());
   }
 
   /**
    * Get all distributable objects of a segmentid
    */
-  @Override public List<DataMapDistributable> toDistributable(Segment segment) {
+  @Override
+  public List<DataMapDistributable> toDistributable(Segment segment) {
     List<DataMapDistributable> lstDataMapDistribute = new ArrayList<>();
     CarbonFile[] indexDirs =
         getAllIndexDirs(tableIdentifier.getTablePath(), segment.getSegmentNo());

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapRefresher.java
----------------------------------------------------------------------
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapRefresher.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapRefresher.java
deleted file mode 100644
index ee500ef..0000000
--- a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapRefresher.java
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.datamap.lucene;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.carbondata.common.logging.LogService;
-import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.datamap.Segment;
-import org.apache.carbondata.core.datamap.dev.DataMapRefresher;
-import org.apache.carbondata.core.datastore.impl.FileFactory;
-import org.apache.carbondata.core.metadata.datatype.DataType;
-import org.apache.carbondata.core.metadata.datatype.DataTypes;
-import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
-import org.apache.carbondata.core.util.CarbonProperties;
-import org.apache.carbondata.core.util.path.CarbonTablePath;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
-import org.apache.lucene.codecs.lucene62.Lucene62Codec;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.DoublePoint;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FloatPoint;
-import org.apache.lucene.document.IntPoint;
-import org.apache.lucene.document.IntRangeField;
-import org.apache.lucene.document.LongPoint;
-import org.apache.lucene.document.StoredField;
-import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.solr.store.hdfs.HdfsDirectory;
-
-public class LuceneDataMapRefresher implements DataMapRefresher {
-
-  private static final LogService LOGGER =
-      LogServiceFactory.getLogService(LuceneDataMapWriter.class.getName());
-
-  private String dataMapPath;
-
-  private List<CarbonColumn> indexColumns;
-
-  private int columnsCount;
-
-  private IndexWriter indexWriter = null;
-
-  private IndexWriter pageIndexWriter = null;
-
-  private Analyzer analyzer = null;
-
-  LuceneDataMapRefresher(String tablePath, String dataMapName,
-      Segment segment, String shardName, List<CarbonColumn> indexColumns) {
-    this.dataMapPath = CarbonTablePath.getDataMapStorePathOnShardName(
-        tablePath, segment.getSegmentNo(), dataMapName, shardName);
-    this.indexColumns = indexColumns;
-    this.columnsCount = indexColumns.size();
-  }
-
-  @Override
-  public void initialize() throws IOException {
-    // get index path, put index data into segment's path
-    Path indexPath = FileFactory.getPath(dataMapPath);
-    FileSystem fs = FileFactory.getFileSystem(indexPath);
-
-    // if index path exists, should delete it because we are
-    // rebuilding the whole datamap for all segments
-    if (fs.exists(indexPath)) {
-      fs.delete(indexPath, true);
-    }
-    if (!fs.mkdirs(indexPath)) {
-      LOGGER.error("Failed to create directory " + indexPath);
-    }
-
-    if (null == analyzer) {
-      analyzer = new StandardAnalyzer();
-    }
-
-    // create a index writer
-    Directory indexDir = new HdfsDirectory(indexPath, FileFactory.getConfiguration());
-
-    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
-    if (CarbonProperties.getInstance()
-        .getProperty(CarbonCommonConstants.CARBON_LUCENE_COMPRESSION_MODE,
-            CarbonCommonConstants.CARBON_LUCENE_COMPRESSION_MODE_DEFAULT)
-        .equalsIgnoreCase(CarbonCommonConstants.CARBON_LUCENE_COMPRESSION_MODE_DEFAULT)) {
-      indexWriterConfig.setCodec(new Lucene62Codec(Lucene50StoredFieldsFormat.Mode.BEST_SPEED));
-    } else {
-      indexWriterConfig
-          .setCodec(new Lucene62Codec(Lucene50StoredFieldsFormat.Mode.BEST_COMPRESSION));
-    }
-
-    indexWriter = new IndexWriter(indexDir, new IndexWriterConfig(analyzer));
-  }
-
-  private IndexWriter createPageIndexWriter() throws IOException {
-    // save index data into ram, write into disk after one page finished
-    RAMDirectory ramDir = new RAMDirectory();
-    return new IndexWriter(ramDir, new IndexWriterConfig(analyzer));
-  }
-
-  private void addPageIndex(IndexWriter pageIndexWriter) throws IOException {
-
-    Directory directory = pageIndexWriter.getDirectory();
-
-    // close ram writer
-    pageIndexWriter.close();
-
-    // add ram index data into disk
-    indexWriter.addIndexes(directory);
-
-    // delete this ram data
-    directory.close();
-  }
-
-  @Override
-  public void addRow(int blockletId, int pageId, int rowId, Object[] values) throws IOException {
-    if (rowId == 0) {
-      if (pageIndexWriter != null) {
-        addPageIndex(pageIndexWriter);
-      }
-      pageIndexWriter = createPageIndexWriter();
-    }
-
-    // create a new document
-    Document doc = new Document();
-
-    // add blocklet Id
-    doc.add(new IntPoint(LuceneDataMapWriter.BLOCKLETID_NAME, (int) values[columnsCount]));
-    doc.add(new StoredField(LuceneDataMapWriter.BLOCKLETID_NAME, (int) values[columnsCount]));
-
-    // add page id
-    doc.add(new IntPoint(LuceneDataMapWriter.PAGEID_NAME, (int) values[columnsCount + 1]));
-    doc.add(new StoredField(LuceneDataMapWriter.PAGEID_NAME, (int) values[columnsCount + 1]));
-
-    // add row id
-    doc.add(new IntPoint(LuceneDataMapWriter.ROWID_NAME, rowId));
-    doc.add(new StoredField(LuceneDataMapWriter.ROWID_NAME, rowId));
-
-    // add other fields
-    for (int colIdx = 0; colIdx < columnsCount; colIdx++) {
-      CarbonColumn column = indexColumns.get(colIdx);
-      addField(doc, column.getColName(), column.getDataType(), values[colIdx]);
-    }
-
-    pageIndexWriter.addDocument(doc);
-  }
-
-  private boolean addField(Document doc, String fieldName, DataType type, Object value) {
-    if (type == DataTypes.STRING) {
-      doc.add(new TextField(fieldName, (String) value, Field.Store.NO));
-    } else if (type == DataTypes.BYTE) {
-      // byte type , use int range to deal with byte, lucene has no byte type
-      IntRangeField field =
-          new IntRangeField(fieldName, new int[] { Byte.MIN_VALUE }, new int[] { Byte.MAX_VALUE });
-      field.setIntValue((int) value);
-      doc.add(field);
-    } else if (type == DataTypes.SHORT) {
-      // short type , use int range to deal with short type, lucene has no short type
-      IntRangeField field = new IntRangeField(fieldName, new int[] { Short.MIN_VALUE },
-          new int[] { Short.MAX_VALUE });
-      field.setShortValue((short) value);
-      doc.add(field);
-    } else if (type == DataTypes.INT) {
-      // int type , use int point to deal with int type
-      doc.add(new IntPoint(fieldName, (int) value));
-    } else if (type == DataTypes.LONG) {
-      // long type , use long point to deal with long type
-      doc.add(new LongPoint(fieldName, (long) value));
-    } else if (type == DataTypes.FLOAT) {
-      doc.add(new FloatPoint(fieldName, (float) value));
-    } else if (type == DataTypes.DOUBLE) {
-      doc.add(new DoublePoint(fieldName, (double) value));
-    } else if (type == DataTypes.DATE) {
-      // TODO: how to get data value
-    } else if (type == DataTypes.TIMESTAMP) {
-      // TODO: how to get
-    } else if (type == DataTypes.BOOLEAN) {
-      IntRangeField field = new IntRangeField(fieldName, new int[] { 0 }, new int[] { 1 });
-      field.setIntValue((boolean) value ? 1 : 0);
-      doc.add(field);
-    } else {
-      LOGGER.error("unsupport data type " + type);
-      throw new RuntimeException("unsupported data type " + type);
-    }
-    return true;
-  }
-
-  @Override
-  public void finish() throws IOException {
-    if (indexWriter != null && pageIndexWriter != null) {
-      addPageIndex(pageIndexWriter);
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (indexWriter != null) {
-      indexWriter.close();
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
index a5d4df2..c5365d5 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
@@ -28,7 +28,6 @@ import java.util.List;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.constants.CarbonCommonConstantsInternal;
 import org.apache.carbondata.core.datamap.DataMapChooser;
-import org.apache.carbondata.core.datamap.DataMapLevel;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.datamap.dev.expr.DataMapExprWrapper;
 import org.apache.carbondata.core.exception.InvalidConfigurationException;
@@ -420,7 +419,7 @@ m filterExpression
     DataMapJob dataMapJob = getDataMapJob(job.getConfiguration());
     List<PartitionSpec> partitionsToPrune = getPartitionsToPrune(job.getConfiguration());
     // First prune using default datamap on driver side.
-    DataMapExprWrapper dataMapExprWrapper = DataMapChooser.get()
+    DataMapExprWrapper dataMapExprWrapper = DataMapChooser
         .getDefaultDataMap(getOrCreateCarbonTable(job.getConfiguration()), resolver);
     List<ExtendedBlocklet> prunedBlocklets =
         dataMapExprWrapper.prune(segmentIds, partitionsToPrune);
@@ -428,9 +427,10 @@ m filterExpression
     ExplainCollector.recordDefaultDataMapPruning(
         dataMapExprWrapper.getDataMapSchema(), prunedBlocklets.size());
 
+    DataMapChooser chooser = new DataMapChooser(getOrCreateCarbonTable(job.getConfiguration()));
+
     // Get the available CG datamaps and prune further.
-    DataMapExprWrapper cgDataMapExprWrapper = DataMapChooser.get()
-        .chooseCGDataMap(getOrCreateCarbonTable(job.getConfiguration()), resolver);
+    DataMapExprWrapper cgDataMapExprWrapper = chooser.chooseCGDataMap(resolver);
     if (cgDataMapExprWrapper != null) {
       // Prune segments from already pruned blocklets
       pruneSegments(segmentIds, prunedBlocklets);
@@ -447,19 +447,19 @@ m filterExpression
           cgDataMapExprWrapper.getDataMapSchema(), prunedBlocklets.size());
     }
     // Now try to prune with FG DataMap.
-    dataMapExprWrapper = DataMapChooser.get()
-        .chooseFGDataMap(getOrCreateCarbonTable(job.getConfiguration()), resolver);
-    if (dataMapExprWrapper != null && dataMapExprWrapper.getDataMapLevel() == DataMapLevel.FG
-        && isFgDataMapPruningEnable(job.getConfiguration()) && dataMapJob != null) {
-      // Prune segments from already pruned blocklets
-      pruneSegments(segmentIds, prunedBlocklets);
-      prunedBlocklets =
-          executeDataMapJob(carbonTable, resolver, segmentIds, dataMapExprWrapper, dataMapJob,
-              partitionsToPrune);
+    if (isFgDataMapPruningEnable(job.getConfiguration()) && dataMapJob != null) {
+      DataMapExprWrapper fgDataMapExprWrapper = chooser.chooseFGDataMap(resolver);
+      if (fgDataMapExprWrapper != null) {
+        // Prune segments from already pruned blocklets
+        pruneSegments(segmentIds, prunedBlocklets);
+        prunedBlocklets =
+            executeDataMapJob(carbonTable, resolver, segmentIds, fgDataMapExprWrapper, dataMapJob,
+                partitionsToPrune);
 
-      ExplainCollector.recordFGDataMapPruning(
-          dataMapExprWrapper.getDataMapSchema(), prunedBlocklets.size());
-    }
+        ExplainCollector.recordFGDataMapPruning(
+            fgDataMapExprWrapper.getDataMapSchema(), prunedBlocklets.size());
+      }
+    } // TODO: add a else branch to push FGDataMap pruning to reader side
     return prunedBlocklets;
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
index 2851de2..0bcb188 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonOutputCommitter.java
@@ -168,7 +168,7 @@ public class CarbonOutputCommitter extends FileOutputCommitter {
       } else {
         CarbonLoaderUtil.recordNewLoadMetadata(newMetaEntry, loadModel, false, false, uuid);
       }
-      DataMapStatusManager.disableDataMapsOfTable(carbonTable);
+      DataMapStatusManager.disableAllLazyDataMaps(carbonTable);
       if (operationContext != null) {
         LoadEvents.LoadTablePostStatusUpdateEvent postStatusUpdateEvent =
             new LoadEvents.LoadTablePostStatusUpdateEvent(loadModel);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index 406ee41..9981ff5 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -30,7 +30,7 @@ import org.apache.carbondata.common.exceptions.sql.{MalformedCarbonCommandExcept
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.core.datamap.DataMapStoreManager
-import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException
+import org.apache.carbondata.core.datamap.status.DataMapStatusManager
 
 class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
 
@@ -62,14 +62,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
         | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT')
       """.stripMargin)
 
-    sql("DROP TABLE IF EXISTS datamap_test4")
-
-    sql(
-      """
-        | CREATE TABLE datamap_test4(id INT, name STRING, city STRING, age INT)
-        | STORED BY 'carbondata'
-        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT', 'autorefreshdatamap' = 'false')
-      """.stripMargin)
+    sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test OPTIONS('header'='false')")
   }
 
   test("validate INDEX_COLUMNS DataMap property") {
@@ -132,38 +125,38 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
          | DMProperties('INDEX_COLUMNS'='Name , cIty')
       """.stripMargin)
 
-    sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test OPTIONS('header'='false')")
     checkAnswer(sql("SELECT * FROM datamap_test WHERE TEXT_MATCH('name:n10')"), sql(s"select * from datamap_test where name='n10'"))
     checkAnswer(sql("SELECT * FROM datamap_test WHERE TEXT_MATCH('city:c020')"), sql(s"SELECT * FROM datamap_test WHERE city='c020'"))
 
     sql("drop datamap dm on table datamap_test")
   }
 
-  test("test lucene refresh data map") {
-
+  test("test lucene rebuild data map") {
+    sql("DROP TABLE IF EXISTS datamap_test4")
+    sql(
+      """
+        | CREATE TABLE datamap_test4(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'carbondata'
+        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT', 'autorefreshdatamap' = 'false')
+      """.stripMargin)
     sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test4 OPTIONS('header'='false')")
 
-    //sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test4 OPTIONS('header'='false')")
-
     sql(
       s"""
          | CREATE DATAMAP dm4 ON TABLE datamap_test4
          | USING 'lucene'
-         | DMProperties('INDEX_COLUMNS'='Name , cIty')
+         | WITH DEFERRED REBUILD
+         | DMProperties('INDEX_COLUMNS'='name , city')
       """.stripMargin)
 
-    //sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test4 OPTIONS('header'='false')")
+    sql("REBUILD DATAMAP dm4 ON TABLE datamap_test4")
 
-    sql("refresh datamap dm4 ON TABLE datamap_test4")
-
-    checkAnswer(sql("SELECT * FROM datamap_test4 WHERE TEXT_MATCH('name:n10')"), sql(s"select * from datamap_test4 where name='n10'"))
+    checkAnswer(sql("SELECT * FROM datamap_test4 WHERE TEXT_MATCH('name:n10')"), sql(s"select * from datamap_test where name='n10'"))
     checkAnswer(sql("SELECT * FROM datamap_test4 WHERE TEXT_MATCH('city:c020')"), sql(s"SELECT * FROM datamap_test4 WHERE city='c020'"))
 
-    sql("drop datamap dm4 on table datamap_test4")
-
+    sql("drop table datamap_test4")
   }
 
-
   test("test lucene fine grain data map drop") {
     sql("DROP TABLE IF EXISTS datamap_test1")
     sql(
@@ -549,6 +542,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
   }
 
   test("test lucene fine grain data map with text-match limit") {
+
     sql(
       s"""
          | CREATE DATAMAP dm ON TABLE datamap_test
@@ -556,7 +550,6 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
          | DMProperties('INDEX_COLUMNS'='name , city')
       """.stripMargin)
 
-    sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test OPTIONS('header'='false')")
     checkAnswer(sql("select count(*) from datamap_test where TEXT_MATCH_WITH_LIMIT('name:n10*',10)"),Seq(Row(10)))
     checkAnswer(sql("select count(*) from datamap_test where TEXT_MATCH_WITH_LIMIT('name:n10*',50)"),Seq(Row(50)))
     sql("drop datamap dm on table datamap_test")
@@ -570,17 +563,16 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
          | DMProperties('INDEX_COLUMNS'='name , city')
       """.stripMargin)
 
-    sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test OPTIONS('header'='false')")
-    sql("DROP TABLE IF EXISTS tabl1")
     sql(
       """
         | CREATE TABLE table1(id INT, name STRING, city STRING, age INT)
         | STORED BY 'carbondata'
         | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT')
       """.stripMargin)
-    sql("INSERT OVERWRITE TABLE table1 select *from datamap_test where TEXT_MATCH('name:n*')")
+    sql("INSERT OVERWRITE TABLE table1 select * from datamap_test where TEXT_MATCH('name:n*')")
     checkAnswer(sql("select count(*) from table1"),Seq(Row(10000)))
     sql("drop datamap dm on table datamap_test")
+    sql("drop table table1")
   }
 
   test("explain query with lucene datamap") {
@@ -698,7 +690,31 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
       sql(s"select * from datamap_test5 where name='n10'"))
     checkAnswer(sql("SELECT * FROM datamap_test5 WHERE TEXT_MATCH('city:c020')"),
       sql(s"SELECT * FROM datamap_test5 WHERE city='c020'"))
+    sql("DROP TABLE IF EXISTS datamap_test5")
+  }
 
+  test("test lucene fine grain datamap rebuild") {
+    sql("DROP TABLE IF EXISTS datamap_test5")
+    sql(
+      """
+        | CREATE TABLE datamap_test5(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'carbondata'
+        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT')
+      """.stripMargin)
+    sql(
+      s"""
+         | CREATE DATAMAP dm ON TABLE datamap_test5
+         | USING 'lucene'
+         | WITH DEFERRED REBUILD
+         | DMProperties('INDEX_COLUMNS'='city')
+      """.stripMargin)
+    sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE datamap_test5 OPTIONS('header'='false')")
+    val map = DataMapStatusManager.readDataMapStatusMap()
+    assert(!map.get("dm").isEnabled)
+    sql("REBUILD DATAMAP dm ON TABLE datamap_test5")
+    checkAnswer(sql("SELECT * FROM datamap_test5 WHERE TEXT_MATCH('city:c020')"),
+      sql(s"SELECT * FROM datamap_test5 WHERE city='c020'"))
+    sql("DROP TABLE IF EXISTS datamap_test5")
   }
 
   test("test text_match on normal table") {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
index 629e9d9..fb25141 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
@@ -436,6 +436,16 @@ class TestPreAggCreateCommand extends QueryTest with BeforeAndAfterAll {
     }
   }
 
+  test("test pre agg datamap with deferred rebuild") {
+    val e = intercept[MalformedDataMapCommandException] {
+      sql("create datamap failure on table PreAggMain1 " +
+          "using 'preaggregate' " +
+          "with deferred rebuild " +
+          "as select a as a1,sum(b) as sum from PreAggMain1 group by a")
+    }
+    assert(e.getMessage.contains("DEFERRED REBUILD is not supported on this DataMap"))
+  }
+
   // TODO: Need to Fix
   ignore("test creation of multiple preaggregate of same name concurrently") {
     sql("DROP TABLE IF EXISTS tbl_concurr")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala
index b441bb4..d8fc46f 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala
@@ -28,7 +28,7 @@ import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datamap.{DataMapDistributable, DataMapMeta, Segment}
-import org.apache.carbondata.core.datamap.dev.{DataMapModel, DataMapRefresher, DataMapWriter}
+import org.apache.carbondata.core.datamap.dev.{DataMapModel, DataMapBuilder, DataMapWriter}
 import org.apache.carbondata.core.datamap.dev.cgdatamap.{CoarseGrainDataMap, CoarseGrainDataMapFactory}
 import org.apache.carbondata.core.datastore.FileReader
 import org.apache.carbondata.core.datastore.block.SegmentProperties
@@ -147,8 +147,8 @@ class CGDataMapFactory(
     false
   }
 
-  override def createRefresher(segment: Segment,
-      shardName: String): DataMapRefresher = {
+  override def createBuilder(segment: Segment,
+      shardName: String): DataMapBuilder = {
     ???
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala
index 5e0c10a..ffbcf67 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala
@@ -27,7 +27,7 @@ import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datamap.{DataMapDistributable, DataMapMeta, Segment}
-import org.apache.carbondata.core.datamap.dev.{DataMapRefresher, DataMapWriter}
+import org.apache.carbondata.core.datamap.dev.{DataMapBuilder, DataMapWriter}
 import org.apache.carbondata.core.datamap.dev.cgdatamap.{CoarseGrainDataMap, CoarseGrainDataMapFactory}
 import org.apache.carbondata.core.datastore.page.ColumnPage
 import org.apache.carbondata.core.features.TableOperation
@@ -83,8 +83,8 @@ class C2DataMapFactory(
     false
   }
 
-  override def createRefresher(segment: Segment,
-      shardName: String): DataMapRefresher = {
+  override def createBuilder(segment: Segment,
+      shardName: String): DataMapBuilder = {
     ???
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
index 976e580..535a112 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
@@ -27,7 +27,9 @@ import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.core.datamap.{DataMapDistributable, DataMapMeta}
 import org.apache.carbondata.core.datamap.Segment
-import org.apache.carbondata.core.datamap.dev.{DataMapModel, DataMapRefresher, DataMapWriter}
+import org.apache.carbondata.core.datamap.dev.{DataMapModel, DataMapBuilder, DataMapWriter}
+import org.apache.carbondata.core.datamap.{DataMapDistributable, DataMapMeta, Segment}
+import org.apache.carbondata.core.datamap.dev.{DataMapModel, DataMapBuilder, DataMapWriter}
 import org.apache.carbondata.core.datamap.dev.fgdatamap.{FineGrainBlocklet, FineGrainDataMap, FineGrainDataMapFactory}
 import org.apache.carbondata.core.datastore.FileReader
 import org.apache.carbondata.core.datastore.block.SegmentProperties
@@ -141,8 +143,8 @@ class FGDataMapFactory(carbonTable: CarbonTable,
     false
   }
 
-  override def createRefresher(segment: Segment,
-      shardName: String): DataMapRefresher = {
+  override def createBuilder(segment: Segment,
+      shardName: String): DataMapBuilder = {
     ???
   }
 }


[17/50] [abbrv] carbondata git commit: [CARBONDATA-2443][SDK] Multi level complex type support for AVRO based SDK

Posted by ma...@apache.org.
[CARBONDATA-2443][SDK] Multi level complex type support for AVRO based SDK

Problem:
Problem inferring the complex type schema with boolean array type from the store created using SDK writer

Analysis:
When we create an external table and infer the schema from store created using SDK writer, the operation fails because of complex type field with boolean array dataType. This is because during schema creation by SDK writer, for array type children a child with column name val is added.
While parsing the logic to append the parent name with child column name is missing for boolean type which is causing this problem.

Solution:
Handle the parsing for boolean type

This closes #2294


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/35a7b5e9
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/35a7b5e9
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/35a7b5e9

Branch: refs/heads/spark-2.3
Commit: 35a7b5e9af5cabe85794274e07cab9a6bbbbc53f
Parents: ff5166e
Author: manishgupta88 <to...@gmail.com>
Authored: Thu May 10 17:09:17 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu May 10 20:49:28 2018 +0530

----------------------------------------------------------------------
 .../schema/table/TableSchemaBuilder.java        | 33 ++++++++------------
 .../schema/table/TableSchemaBuilderSuite.java   | 13 +++++---
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala |  5 +++
 .../sdk/file/CarbonWriterBuilder.java           | 13 ++++++--
 4 files changed, 36 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/35a7b5e9/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
index ca082e1..b078400 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.constants.CarbonV3DataFormatConstants;
@@ -114,12 +115,12 @@ public class TableSchemaBuilder {
     this.sortColumns = sortColumns;
   }
 
-  public ColumnSchema addColumn(StructField field, boolean isSortColumn) {
-    return addColumn(field, null, isSortColumn, false);
+  public ColumnSchema addColumn(StructField field, AtomicInteger valIndex, boolean isSortColumn) {
+    return addColumn(field, null, valIndex, isSortColumn, false);
   }
 
-  private ColumnSchema addColumn(StructField field, String parentName, boolean isSortColumn,
-      boolean isComplexChild) {
+  private ColumnSchema addColumn(StructField field, String parentName, AtomicInteger valIndex,
+      boolean isSortColumn, boolean isComplexChild) {
     Objects.requireNonNull(field);
     checkRepeatColumnName(field);
     ColumnSchema newColumn = new ColumnSchema();
@@ -184,33 +185,25 @@ public class TableSchemaBuilder {
     if (field.getDataType().isComplexType()) {
       String parentFieldName = newColumn.getColumnName();
       if (field.getDataType().getName().equalsIgnoreCase("ARRAY")) {
-        String colName = getColNameForArray(parentFieldName);
-        addColumn(new StructField(colName,
-            ((ArrayType) field.getDataType()).getElementType()), field.getFieldName(), false, true);
+        String colName = getColNameForArray(valIndex);
+        addColumn(new StructField(colName, ((ArrayType) field.getDataType()).getElementType()),
+            field.getFieldName(), valIndex, false, true);
       } else if (field.getDataType().getName().equalsIgnoreCase("STRUCT")
           && ((StructType) field.getDataType()).getFields().size() > 0) {
         // This field has children.
         List<StructField> fields = ((StructType) field.getDataType()).getFields();
         for (int i = 0; i < fields.size(); i++) {
-          addColumn(fields.get(i), parentFieldName, false, true);
+          addColumn(fields.get(i), parentFieldName, valIndex, false, true);
         }
       }
     }
     return newColumn;
   }
 
-  private String getColNameForArray(String parentFieldName) {
-    if (!parentFieldName.endsWith(".val")) {
-      return "val";
-    } else {
-      String[] splits = parentFieldName.split("val");
-      if (splits.length == 1) {
-        return "val" + 1;
-      } else {
-        return "val" + (Integer.parseInt(parentFieldName
-            .substring(parentFieldName.lastIndexOf("val") + 3, parentFieldName.length())) + 1);
-      }
-    }
+  private String getColNameForArray(AtomicInteger valIndex) {
+    String colName = "val" + valIndex.get();
+    valIndex.incrementAndGet();
+    return colName;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/35a7b5e9/core/src/test/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilderSuite.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilderSuite.java b/core/src/test/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilderSuite.java
index e9dce94..48e5d1b 100644
--- a/core/src/test/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilderSuite.java
+++ b/core/src/test/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilderSuite.java
@@ -19,6 +19,7 @@ package org.apache.carbondata.core.metadata.schema.table;
 
 import java.util.Arrays;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.metadata.datatype.StructField;
@@ -32,15 +33,16 @@ public class TableSchemaBuilderSuite {
   @Test(expected = NullPointerException.class)
   public void testNullField() {
     TableSchemaBuilder builder = TableSchema.builder();
-    builder.addColumn(null, true);
+    builder.addColumn(null, new AtomicInteger(0), true);
   }
 
   @Test
   public void testBuilder() {
     TableSchemaBuilder builder = TableSchema.builder();
-    ColumnSchema columnSchema = builder.addColumn(new StructField("a", DataTypes.INT), true);
+    ColumnSchema columnSchema =
+        builder.addColumn(new StructField("a", DataTypes.INT), new AtomicInteger(0), true);
     builder.setSortColumns(Arrays.asList(columnSchema));
-    builder.addColumn(new StructField("b", DataTypes.DOUBLE), false);
+    builder.addColumn(new StructField("b", DataTypes.DOUBLE), new AtomicInteger(0), false);
     TableSchema schema = builder.build();
     Assert.assertEquals(2, schema.getListOfColumns().size());
     List<ColumnSchema> columns = schema.getListOfColumns();
@@ -51,9 +53,10 @@ public class TableSchemaBuilderSuite {
   @Test(expected = IllegalArgumentException.class)
   public void testRepeatedColumn() {
     TableSchemaBuilder builder = TableSchema.builder();
-    ColumnSchema columnSchema = builder.addColumn(new StructField("a", DataTypes.INT), true);
+    ColumnSchema columnSchema =
+        builder.addColumn(new StructField("a", DataTypes.INT), new AtomicInteger(0), true);
     builder.setSortColumns(Arrays.asList(columnSchema));
-    builder.addColumn(new StructField("a", DataTypes.DOUBLE), false);
+    builder.addColumn(new StructField("a", DataTypes.DOUBLE), new AtomicInteger(0), false);
     TableSchema schema = builder.build();
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/35a7b5e9/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 890f8fc..9bc5597 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -1147,6 +1147,9 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
       case "decimal" => Field(field.column, Some("Decimal"), field.name, Some(null), field.parent,
         field.storeType, field.schemaOrdinal, field.precision, field.scale, field.rawSchema,
         field.columnComment)
+      case "boolean" => Field(field.column, Some("Boolean"), field.name, Some(null), field.parent,
+        field.storeType, field.schemaOrdinal, field.precision, field.scale, field.rawSchema,
+        field.columnComment)
       // checking if the nested data type contains the child type as decimal(10,0),
       // if it is present then extracting the precision and scale. resetting the data type
       // with Decimal.
@@ -1214,6 +1217,8 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
       case "Decimal" => Field(parentName + "." + field.column, Some("Decimal"),
         Some(parentName + "." + field.name.getOrElse(None)), Some(null), parentName,
         field.storeType, field.schemaOrdinal, field.precision, field.scale)
+      case "Boolean" => Field(parentName + "." + field.column, Some("Boolean"),
+        Some(parentName + "." + field.name.getOrElse(None)), Some(null), parentName)
       case _ => field
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/35a7b5e9/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
index 76a46d0..f541dbb 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
@@ -25,6 +25,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.TreeMap;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.carbondata.common.annotations.InterfaceAudience;
 import org.apache.carbondata.common.annotations.InterfaceStability;
@@ -426,6 +427,10 @@ public class CarbonWriterBuilder {
 
   private void buildTableSchema(Field[] fields, TableSchemaBuilder tableSchemaBuilder,
       List<String> sortColumnsList, ColumnSchema[] sortColumnsSchemaList) {
+    // a counter which will be used in case of complex array type. This valIndex will be assigned
+    // to child of complex array type in the order val1, val2 so that each array type child is
+    // differentiated to any level
+    AtomicInteger valIndex = new AtomicInteger(0);
     for (Field field : fields) {
       if (null != field) {
         int isSortColumn = sortColumnsList.indexOf(field.getFieldName());
@@ -443,7 +448,8 @@ public class CarbonWriterBuilder {
             // Loop through the inner columns and for a StructData
             DataType complexType =
                 DataTypes.createArrayType(field.getChildren().get(0).getDataType());
-            tableSchemaBuilder.addColumn(new StructField(field.getFieldName(), complexType), false);
+            tableSchemaBuilder
+                .addColumn(new StructField(field.getFieldName(), complexType), valIndex, false);
           } else if (field.getDataType().getName().equalsIgnoreCase("STRUCT")) {
             // Loop through the inner columns and for a StructData
             List<StructField> structFieldsArray =
@@ -453,12 +459,13 @@ public class CarbonWriterBuilder {
                   .add(new StructField(childFld.getFieldName(), childFld.getDataType()));
             }
             DataType complexType = DataTypes.createStructType(structFieldsArray);
-            tableSchemaBuilder.addColumn(new StructField(field.getFieldName(), complexType), false);
+            tableSchemaBuilder
+                .addColumn(new StructField(field.getFieldName(), complexType), valIndex, false);
           }
         } else {
           ColumnSchema columnSchema = tableSchemaBuilder
               .addColumn(new StructField(field.getFieldName(), field.getDataType()),
-                  isSortColumn > -1);
+                  valIndex, isSortColumn > -1);
           columnSchema.setSortColumn(true);
           if (isSortColumn > -1) {
             sortColumnsSchemaList[isSortColumn] = columnSchema;


[08/50] [abbrv] carbondata git commit: [CARBONDATA-2408] Fix search mode master SaslException issue in the first time

Posted by ma...@apache.org.
[CARBONDATA-2408] Fix search mode master SaslException issue in the first time

This closes #2239


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/fb128974
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/fb128974
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/fb128974

Branch: refs/heads/spark-2.3
Commit: fb12897474f8eb9b4ca04764f9ee15890573b057
Parents: 452c42b
Author: xubo245 <60...@qq.com>
Authored: Fri Apr 27 18:57:37 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Wed May 9 11:02:48 2018 +0800

----------------------------------------------------------------------
 .../carbondata/store/SparkCarbonStore.scala     |  1 +
 .../scala/org/apache/spark/rpc/Master.scala     | 24 ++++++++++++++++++++
 2 files changed, 25 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/fb128974/integration/spark2/src/main/scala/org/apache/carbondata/store/SparkCarbonStore.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/store/SparkCarbonStore.scala b/integration/spark2/src/main/scala/org/apache/carbondata/store/SparkCarbonStore.scala
index c0d0d09..3a6adea 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/store/SparkCarbonStore.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/store/SparkCarbonStore.scala
@@ -110,6 +110,7 @@ class SparkCarbonStore extends MetaCachedCarbonStore {
   }
 
   def startSearchMode(): Unit = {
+    LOG.info("Starting search mode master")
     master = new Master(session.sparkContext.getConf)
     master.startService()
     startAllWorkers()

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fb128974/store/search/src/main/scala/org/apache/spark/rpc/Master.scala
----------------------------------------------------------------------
diff --git a/store/search/src/main/scala/org/apache/spark/rpc/Master.scala b/store/search/src/main/scala/org/apache/spark/rpc/Master.scala
index df793b4..5b31a49 100644
--- a/store/search/src/main/scala/org/apache/spark/rpc/Master.scala
+++ b/store/search/src/main/scala/org/apache/spark/rpc/Master.scala
@@ -20,6 +20,7 @@ package org.apache.spark.rpc
 import java.io.IOException
 import java.net.{BindException, InetAddress}
 import java.util.{List => JList, Map => JMap, Objects, Random, UUID}
+import java.util.concurrent.atomic.AtomicBoolean
 
 import scala.collection.JavaConverters._
 import scala.collection.mutable.ArrayBuffer
@@ -67,6 +68,8 @@ class Master(sparkConf: SparkConf) {
   /** start service and listen on port passed in constructor */
   def startService(): Unit = {
     if (rpcEnv == null) {
+      LOG.info("Start search mode master thread")
+      val isStarted: AtomicBoolean = new AtomicBoolean(false)
       new Thread(new Runnable {
         override def run(): Unit = {
           val hostAddress = InetAddress.getLocalHost.getHostAddress
@@ -96,10 +99,31 @@ class Master(sparkConf: SparkConf) {
           }
           val registryEndpoint: RpcEndpoint = new Registry(rpcEnv, Master.this)
           rpcEnv.setupEndpoint("registry-service", registryEndpoint)
+          if (isStarted.compareAndSet(false, false)) {
+            synchronized {
+              isStarted.compareAndSet(false, true)
+            }
+          }
           LOG.info("registry-service started")
           rpcEnv.awaitTermination()
         }
       }).start()
+      var count = 0
+      val countThreshold = 5000
+      while (isStarted.compareAndSet(false, false) && count < countThreshold) {
+        LOG.info(s"Waiting search mode master to start, retrying $count times")
+        Thread.sleep(10)
+        count = count + 1;
+      }
+      if (count >= countThreshold) {
+        LOG.error(s"Search mode try $countThreshold times to start master but failed")
+        throw new RuntimeException(
+          s"Search mode try $countThreshold times to start master but failed")
+      } else {
+        LOG.info("Search mode master started")
+      }
+    } else {
+      LOG.info("Search mode master has already started")
     }
   }
 


[41/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/integration/spark-common-test/src/test/resources/tpch/customers.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/tpch/customers.csv b/integration/spark-common-test/src/test/resources/tpch/customers.csv
new file mode 100644
index 0000000..7e46e5f
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/tpch/customers.csv
@@ -0,0 +1,500 @@
+1|Customer#000000001|IVhzIApeRb ot,c,E|15|25-989-741-2988|711.56|BUILDING|to the even, regular platelets. regular, ironic epitaphs nag e|
+2|Customer#000000002|XSTf4,NCwDVaWNe6tEgvwfmRchLXak|13|23-768-687-3665|121.65|AUTOMOBILE|l accounts. blithely ironic theodolites integrate boldly: caref|
+3|Customer#000000003|MG9kdTD2WBHm|1|11-719-748-3364|7498.12|AUTOMOBILE| deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov|
+4|Customer#000000004|XxVSJsLAGtn|4|14-128-190-5944|2866.83|MACHINERY| requests. final, regular ideas sleep final accou|
+5|Customer#000000005|KvpyuHCplrB84WgAiGV6sYpZq7Tj|3|13-750-942-6364|794.47|HOUSEHOLD|n accounts will have to unwind. foxes cajole accor|
+6|Customer#000000006|sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn|20|30-114-968-4951|7638.57|AUTOMOBILE|tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious|
+7|Customer#000000007|TcGe5gaZNgVePxU5kRrvXBfkasDTea|18|28-190-982-9759|9561.95|AUTOMOBILE|ainst the ironic, express theodolites. express, even pinto beans among the exp|
+8|Customer#000000008|I0B10bB0AymmC, 0PrRYBCP1yGJ8xcBPmWhl5|17|27-147-574-9335|6819.74|BUILDING|among the slyly regular theodolites kindle blithely courts. carefully even theodolites haggle slyly along the ide|
+9|Customer#000000009|xKiAFTjUsCuxfeleNqefumTrjS|8|18-338-906-3675|8324.07|FURNITURE|r theodolites according to the requests wake thinly excuses: pending requests haggle furiousl|
+10|Customer#000000010|6LrEaV6KR6PLVcgl2ArL Q3rqzLzcT1 v2|5|15-741-346-9870|2753.54|HOUSEHOLD|es regular deposits haggle. fur|
+11|Customer#000000011|PkWS 3HlXqwTuzrKg633BEi|23|33-464-151-3439|-272.60|BUILDING|ckages. requests sleep slyly. quickly even pinto beans promise above the slyly regular pinto beans. |
+12|Customer#000000012|9PWKuhzT4Zr1Q|13|23-791-276-1263|3396.49|HOUSEHOLD| to the carefully final braids. blithely regular requests nag. ironic theodolites boost quickly along|
+13|Customer#000000013|nsXQu0oVjD7PM659uC3SRSp|3|13-761-547-5974|3857.34|BUILDING|ounts sleep carefully after the close frays. carefully bold notornis use ironic requests. blithely|
+14|Customer#000000014|KXkletMlL2JQEA |1|11-845-129-3851|5266.30|FURNITURE|, ironic packages across the unus|
+15|Customer#000000015|YtWggXoOLdwdo7b0y,BZaGUQMLJMX1Y,EC,6Dn|23|33-687-542-7601|2788.52|HOUSEHOLD| platelets. regular deposits detect asymptotes. blithely unusual packages nag slyly at the fluf|
+16|Customer#000000016|cYiaeMLZSMAOQ2 d0W,|10|20-781-609-3107|4681.03|FURNITURE|kly silent courts. thinly regular theodolites sleep fluffily after |
+17|Customer#000000017|izrh 6jdqtp2eqdtbkswDD8SG4SzXruMfIXyR7|2|12-970-682-3487|6.34|AUTOMOBILE|packages wake! blithely even pint|
+18|Customer#000000018|3txGO AiuFux3zT0Z9NYaFRnZt|6|16-155-215-1315|5494.43|BUILDING|s sleep. carefully even instructions nag furiously alongside of t|
+19|Customer#000000019|uc,3bHIx84H,wdrmLOjVsiqXCq2tr|18|28-396-526-5053|8914.71|HOUSEHOLD| nag. furiously careful packages are slyly at the accounts. furiously regular in|
+20|Customer#000000020|JrPk8Pqplj4Ne|22|32-957-234-8742|7603.40|FURNITURE|g alongside of the special excuses-- fluffily enticing packages wake |
+21|Customer#000000021|XYmVpr9yAHDEn|8|18-902-614-8344|1428.25|MACHINERY| quickly final accounts integrate blithely furiously u|
+22|Customer#000000022|QI6p41,FNs5k7RZoCCVPUTkUdYpB|3|13-806-545-9701|591.98|MACHINERY|s nod furiously above the furiously ironic ideas. |
+23|Customer#000000023|OdY W13N7Be3OC5MpgfmcYss0Wn6TKT|3|13-312-472-8245|3332.02|HOUSEHOLD|deposits. special deposits cajole slyly. fluffily special deposits about the furiously |
+24|Customer#000000024|HXAFgIAyjxtdqwimt13Y3OZO 4xeLe7U8PqG|13|23-127-851-8031|9255.67|MACHINERY|into beans. fluffily final ideas haggle fluffily|
+25|Customer#000000025|Hp8GyFQgGHFYSilH5tBfe|12|22-603-468-3533|7133.70|FURNITURE|y. accounts sleep ruthlessly according to the regular theodolites. unusual instructions sleep. ironic, final|
+26|Customer#000000026|8ljrc5ZeMl7UciP|22|32-363-455-4837|5182.05|AUTOMOBILE|c requests use furiously ironic requests. slyly ironic dependencies us|
+27|Customer#000000027|IS8GIyxpBrLpMT0u7|3|13-137-193-2709|5679.84|BUILDING| about the carefully ironic pinto beans. accoun|
+28|Customer#000000028|iVyg0daQ,Tha8x2WPWA9m2529m|8|18-774-241-1462|1007.18|FURNITURE| along the regular deposits. furiously final pac|
+29|Customer#000000029|sJ5adtfyAkCK63df2,vF25zyQMVYE34uh|0|10-773-203-7342|7618.27|FURNITURE|its after the carefully final platelets x-ray against |
+30|Customer#000000030|nJDsELGAavU63Jl0c5NKsKfL8rIJQQkQnYL2QJY|1|11-764-165-5076|9321.01|BUILDING|lithely final requests. furiously unusual account|
+31|Customer#000000031|LUACbO0viaAv6eXOAebryDB xjVst|23|33-197-837-7094|5236.89|HOUSEHOLD|s use among the blithely pending depo|
+32|Customer#000000032|jD2xZzi UmId,DCtNBLXKj9q0Tlp2iQ6ZcO3J|15|25-430-914-2194|3471.53|BUILDING|cial ideas. final, furious requests across the e|
+33|Customer#000000033|qFSlMuLucBmx9xnn5ib2csWUweg D|17|27-375-391-1280|-78.56|AUTOMOBILE|s. slyly regular accounts are furiously. carefully pending requests|
+34|Customer#000000034|Q6G9wZ6dnczmtOx509xgE,M2KV|15|25-344-968-5422|8589.70|HOUSEHOLD|nder against the even, pending accounts. even|
+35|Customer#000000035|TEjWGE4nBzJL2|17|27-566-888-7431|1228.24|HOUSEHOLD|requests. special, express requests nag slyly furiousl|
+36|Customer#000000036|3TvCzjuPzpJ0,DdJ8kW5U|21|31-704-669-5769|4987.27|BUILDING|haggle. enticing, quiet platelets grow quickly bold sheaves. carefully regular acc|
+37|Customer#000000037|7EV4Pwh,3SboctTWt|8|18-385-235-7162|-917.75|FURNITURE|ilent packages are carefully among the deposits. furiousl|
+38|Customer#000000038|a5Ee5e9568R8RLP 2ap7|12|22-306-880-7212|6345.11|HOUSEHOLD|lar excuses. closely even asymptotes cajole blithely excuses. carefully silent pinto beans sleep carefully fin|
+39|Customer#000000039|nnbRg,Pvy33dfkorYE FdeZ60|2|12-387-467-6509|6264.31|AUTOMOBILE|tions. slyly silent excuses slee|
+40|Customer#000000040|gOnGWAyhSV1ofv|3|13-652-915-8939|1335.30|BUILDING|rges impress after the slyly ironic courts. foxes are. blithely |
+41|Customer#000000041|IM9mzmyoxeBmvNw8lA7G3Ydska2nkZF|10|20-917-711-4011|270.95|HOUSEHOLD|ly regular accounts hang bold, silent packages. unusual foxes haggle slyly above the special, final depo|
+42|Customer#000000042|ziSrvyyBke|5|15-416-330-4175|8727.01|BUILDING|ssly according to the pinto beans: carefully special requests across the even, pending accounts wake special|
+43|Customer#000000043|ouSbjHk8lh5fKX3zGso3ZSIj9Aa3PoaFd|19|29-316-665-2897|9904.28|MACHINERY|ial requests: carefully pending foxes detect quickly. carefully final courts cajole quickly. carefully|
+44|Customer#000000044|Oi,dOSPwDu4jo4x,,P85E0dmhZGvNtBwi|16|26-190-260-5375|7315.94|AUTOMOBILE|r requests around the unusual, bold a|
+45|Customer#000000045|4v3OcpFgoOmMG,CbnF,4mdC|9|19-715-298-9917|9983.38|AUTOMOBILE|nto beans haggle slyly alongside of t|
+46|Customer#000000046|eaTXWWm10L9|6|16-357-681-2007|5744.59|AUTOMOBILE|ctions. accounts sleep furiously even requests. regular, regular accounts cajole blithely around the final pa|
+47|Customer#000000047|b0UgocSqEW5 gdVbhNT|2|12-427-271-9466|274.58|BUILDING|ions. express, ironic instructions sleep furiously ironic ideas. furi|
+48|Customer#000000048|0UU iPhBupFvemNB|0|10-508-348-5882|3792.50|BUILDING|re fluffily pending foxes. pending, bold platelets sleep slyly. even platelets cajo|
+49|Customer#000000049|cNgAeX7Fqrdf7HQN9EwjUa4nxT,68L FKAxzl|10|20-908-631-4424|4573.94|FURNITURE|nusual foxes! fluffily pending packages maintain to the regular |
+50|Customer#000000050|9SzDYlkzxByyJ1QeTI o|6|16-658-112-3221|4266.13|MACHINERY|ts. furiously ironic accounts cajole furiously slyly ironic dinos.|
+51|Customer#000000051|uR,wEaiTvo4|12|22-344-885-4251|855.87|FURNITURE|eposits. furiously regular requests integrate carefully packages. furious|
+52|Customer#000000052|7 QOqGqqSy9jfV51BC71jcHJSD0|11|21-186-284-5998|5630.28|HOUSEHOLD|ic platelets use evenly even accounts. stealthy theodolites cajole furiou|
+53|Customer#000000053|HnaxHzTfFTZs8MuCpJyTbZ47Cm4wFOOgib|15|25-168-852-5363|4113.64|HOUSEHOLD|ar accounts are. even foxes are blithely. fluffily pending deposits boost|
+54|Customer#000000054|,k4vf 5vECGWFy,hosTE,|4|14-776-370-4745|868.90|AUTOMOBILE|sual, silent accounts. furiously express accounts cajole special deposits. final, final accounts use furi|
+55|Customer#000000055|zIRBR4KNEl HzaiV3a i9n6elrxzDEh8r8pDom|10|20-180-440-8525|4572.11|MACHINERY|ully unusual packages wake bravely bold packages. unusual requests boost deposits! blithely ironic packages ab|
+56|Customer#000000056|BJYZYJQk4yD5B|10|20-895-685-6920|6530.86|FURNITURE|. notornis wake carefully. carefully fluffy requests are furiously even accounts. slyly expre|
+57|Customer#000000057|97XYbsuOPRXPWU|21|31-835-306-1650|4151.93|AUTOMOBILE|ove the carefully special packages. even, unusual deposits sleep slyly pend|
+58|Customer#000000058|g9ap7Dk1Sv9fcXEWjpMYpBZIRUohi T|13|23-244-493-2508|6478.46|HOUSEHOLD|ideas. ironic ideas affix furiously express, final instructions. regular excuses use quickly e|
+59|Customer#000000059|zLOCP0wh92OtBihgspOGl4|1|11-355-584-3112|3458.60|MACHINERY|ously final packages haggle blithely after the express deposits. furiou|
+60|Customer#000000060|FyodhjwMChsZmUz7Jz0H|12|22-480-575-5866|2741.87|MACHINERY|latelets. blithely unusual courts boost furiously about the packages. blithely final instruct|
+61|Customer#000000061|9kndve4EAJxhg3veF BfXr7AqOsT39o gtqjaYE|17|27-626-559-8599|1536.24|FURNITURE|egular packages shall have to impress along the |
+62|Customer#000000062|upJK2Dnw13,|7|17-361-978-7059|595.61|MACHINERY|kly special dolphins. pinto beans are slyly. quickly regular accounts are furiously a|
+63|Customer#000000063|IXRSpVWWZraKII|21|31-952-552-9584|9331.13|AUTOMOBILE|ithely even accounts detect slyly above the fluffily ir|
+64|Customer#000000064|MbCeGY20kaKK3oalJD,OT|3|13-558-731-7204|-646.64|BUILDING|structions after the quietly ironic theodolites cajole be|
+65|Customer#000000065|RGT yzQ0y4l0H90P783LG4U95bXQFDRXbWa1sl,X|23|33-733-623-5267|8795.16|AUTOMOBILE|y final foxes serve carefully. theodolites are carefully. pending i|
+66|Customer#000000066|XbsEqXH1ETbJYYtA1A|22|32-213-373-5094|242.77|HOUSEHOLD|le slyly accounts. carefully silent packages benea|
+67|Customer#000000067|rfG0cOgtr5W8 xILkwp9fpCS8|9|19-403-114-4356|8166.59|MACHINERY|indle furiously final, even theodo|
+68|Customer#000000068|o8AibcCRkXvQFh8hF,7o|12|22-918-832-2411|6853.37|HOUSEHOLD| pending pinto beans impress realms. final dependencies |
+69|Customer#000000069|Ltx17nO9Wwhtdbe9QZVxNgP98V7xW97uvSH1prEw|9|19-225-978-5670|1709.28|HOUSEHOLD|thely final ideas around the quickly final dependencies affix carefully quickly final theodolites. final accounts c|
+70|Customer#000000070|mFowIuhnHjp2GjCiYYavkW kUwOjIaTCQ|22|32-828-107-2832|4867.52|FURNITURE|fter the special asymptotes. ideas after the unusual frets cajole quickly regular pinto be|
+71|Customer#000000071|TlGalgdXWBmMV,6agLyWYDyIz9MKzcY8gl,w6t1B|7|17-710-812-5403|-611.19|HOUSEHOLD|g courts across the regular, final pinto beans are blithely pending ac|
+72|Customer#000000072|putjlmskxE,zs,HqeIA9Wqu7dhgH5BVCwDwHHcf|2|12-759-144-9689|-362.86|FURNITURE|ithely final foxes sleep always quickly bold accounts. final wat|
+73|Customer#000000073|8IhIxreu4Ug6tt5mog4|0|10-473-439-3214|4288.50|BUILDING|usual, unusual packages sleep busily along the furiou|
+74|Customer#000000074|IkJHCA3ZThF7qL7VKcrU nRLl,kylf |4|14-199-862-7209|2764.43|MACHINERY|onic accounts. blithely slow packages would haggle carefully. qui|
+75|Customer#000000075|Dh 6jZ,cwxWLKQfRKkiGrzv6pm|18|28-247-803-9025|6684.10|AUTOMOBILE| instructions cajole even, even deposits. finally bold deposits use above the even pains. slyl|
+76|Customer#000000076|m3sbCvjMOHyaOofH,e UkGPtqc4|0|10-349-718-3044|5745.33|FURNITURE|pecial deposits. ironic ideas boost blithely according to the closely ironic theodolites! furiously final deposits n|
+77|Customer#000000077|4tAE5KdMFGD4byHtXF92vx|17|27-269-357-4674|1738.87|BUILDING|uffily silent requests. carefully ironic asymptotes among the ironic hockey players are carefully bli|
+78|Customer#000000078|HBOta,ZNqpg3U2cSL0kbrftkPwzX|9|19-960-700-9191|7136.97|FURNITURE|ests. blithely bold pinto beans h|
+79|Customer#000000079|n5hH2ftkVRwW8idtD,BmM2|15|25-147-850-4166|5121.28|MACHINERY|es. packages haggle furiously. regular, special requests poach after the quickly express ideas. blithely pending re|
+80|Customer#000000080|K,vtXp8qYB |0|10-267-172-7101|7383.53|FURNITURE|tect among the dependencies. bold accounts engage closely even pinto beans. ca|
+81|Customer#000000081|SH6lPA7JiiNC6dNTrR|20|30-165-277-3269|2023.71|BUILDING|r packages. fluffily ironic requests cajole fluffily. ironically regular theodolit|
+82|Customer#000000082|zhG3EZbap4c992Gj3bK,3Ne,Xn|18|28-159-442-5305|9468.34|AUTOMOBILE|s wake. bravely regular accounts are furiously. regula|
+83|Customer#000000083|HnhTNB5xpnSF20JBH4Ycs6psVnkC3RDf|22|32-817-154-4122|6463.51|BUILDING|ccording to the quickly bold warhorses. final, regular foxes integrate carefully. bold packages nag blithely ev|
+84|Customer#000000084|lpXz6Fwr9945rnbtMc8PlueilS1WmASr CB|11|21-546-818-3802|5174.71|FURNITURE|ly blithe foxes. special asymptotes haggle blithely against the furiously regular depo|
+85|Customer#000000085|siRerlDwiolhYR 8FgksoezycLj|5|15-745-585-8219|3386.64|FURNITURE|ronic ideas use above the slowly pendin|
+86|Customer#000000086|US6EGGHXbTTXPL9SBsxQJsuvy|0|10-677-951-2353|3306.32|HOUSEHOLD|quests. pending dugouts are carefully aroun|
+87|Customer#000000087|hgGhHVSWQl 6jZ6Ev|23|33-869-884-7053|6327.54|FURNITURE|hely ironic requests integrate according to the ironic accounts. slyly regular pla|
+88|Customer#000000088|wtkjBN9eyrFuENSMmMFlJ3e7jE5KXcg|16|26-516-273-2566|8031.44|AUTOMOBILE|s are quickly above the quickly ironic instructions; even requests about the carefully final deposi|
+89|Customer#000000089|dtR, y9JQWUO6FoJExyp8whOU|14|24-394-451-5404|1530.76|FURNITURE|counts are slyly beyond the slyly final accounts. quickly final ideas wake. r|
+90|Customer#000000090|QxCzH7VxxYUWwfL7|16|26-603-491-1238|7354.23|BUILDING|sly across the furiously even |
+91|Customer#000000091|S8OMYFrpHwoNHaGBeuS6E 6zhHGZiprw1b7 q|8|18-239-400-3677|4643.14|AUTOMOBILE|onic accounts. fluffily silent pinto beans boost blithely according to the fluffily exp|
+92|Customer#000000092|obP PULk2LH LqNF,K9hcbNqnLAkJVsl5xqSrY,|2|12-446-416-8471|1182.91|MACHINERY|. pinto beans hang slyly final deposits. ac|
+93|Customer#000000093|EHXBr2QGdh|7|17-359-388-5266|2182.52|MACHINERY|press deposits. carefully regular platelets r|
+94|Customer#000000094|IfVNIN9KtkScJ9dUjK3Pg5gY1aFeaXewwf|9|19-953-499-8833|5500.11|HOUSEHOLD|latelets across the bold, final requests sleep according to the fluffily bold accounts. unusual deposits amon|
+95|Customer#000000095|EU0xvmWvOmUUn5J,2z85DQyG7QCJ9Xq7|15|25-923-255-2929|5327.38|MACHINERY|ithely. ruthlessly final requests wake slyly alongside of the furiously silent pinto beans. even the|
+96|Customer#000000096|vWLOrmXhRR|8|18-422-845-1202|6323.92|AUTOMOBILE|press requests believe furiously. carefully final instructions snooze carefully. |
+97|Customer#000000097|OApyejbhJG,0Iw3j rd1M|17|27-588-919-5638|2164.48|AUTOMOBILE|haggle slyly. bold, special ideas are blithely above the thinly bold theo|
+98|Customer#000000098|7yiheXNSpuEAwbswDW|12|22-885-845-6889|-551.37|BUILDING|ages. furiously pending accounts are quickly carefully final foxes: busily pe|
+99|Customer#000000099|szsrOiPtCHVS97Lt|15|25-515-237-9232|4088.65|HOUSEHOLD|cajole slyly about the regular theodolites! furiously bold requests nag along the pending, regular packages. somas|
+100|Customer#000000100|fptUABXcmkC5Wx|20|30-749-445-4907|9889.89|FURNITURE|was furiously fluffily quiet deposits. silent, pending requests boost against |
+101|Customer#000000101|sMmL2rNeHDltovSm Y|2|12-514-298-3699|7470.96|MACHINERY| sleep. pending packages detect slyly ironic pack|
+102|Customer#000000102|UAtflJ06 fn9zBfKjInkQZlWtqaA|19|29-324-978-8538|8462.17|BUILDING|ously regular dependencies nag among the furiously express dinos. blithely final|
+103|Customer#000000103|8KIsQX4LJ7QMsj6DrtFtXu0nUEdV,8a|9|19-216-107-2107|2757.45|BUILDING|furiously pending notornis boost slyly around the blithely ironic ideas? final, even instructions cajole fl|
+104|Customer#000000104|9mcCK L7rt0SwiYtrbO88DiZS7U d7M|10|20-966-284-8065|-588.38|FURNITURE|rate carefully slyly special pla|
+105|Customer#000000105|4iSJe4L SPjg7kJj98Yz3z0B|10|20-793-553-6417|9091.82|MACHINERY|l pains cajole even accounts. quietly final instructi|
+106|Customer#000000106|xGCOEAUjUNG|1|11-751-989-4627|3288.42|MACHINERY|lose slyly. ironic accounts along the evenly regular theodolites wake about the special, final gifts. |
+107|Customer#000000107|Zwg64UZ,q7GRqo3zm7P1tZIRshBDz|15|25-336-529-9919|2514.15|AUTOMOBILE|counts cajole slyly. regular requests wake. furiously regular deposits about the blithely final fo|
+108|Customer#000000108|GPoeEvpKo1|5|15-908-619-7526|2259.38|BUILDING|refully ironic deposits sleep. regular, unusual requests wake slyly|
+109|Customer#000000109|OOOkYBgCMzgMQXUmkocoLb56rfrdWp2NE2c|16|26-992-422-8153|-716.10|BUILDING|es. fluffily final dependencies sleep along the blithely even pinto beans. final deposits haggle furiously furiou|
+110|Customer#000000110|mymPfgphaYXNYtk|10|20-893-536-2069|7462.99|AUTOMOBILE|nto beans cajole around the even, final deposits. quickly bold packages according to the furiously regular dept|
+111|Customer#000000111|CBSbPyOWRorloj2TBvrK9qp9tHBs|22|32-582-283-7528|6505.26|MACHINERY|ly unusual instructions detect fluffily special deposits-- theodolites nag carefully during the ironic dependencies|
+112|Customer#000000112|RcfgG3bO7QeCnfjqJT1|19|29-233-262-8382|2953.35|FURNITURE|rmanently unusual multipliers. blithely ruthless deposits are furiously along the|
+113|Customer#000000113|eaOl5UBXIvdY57rglaIzqvfPD,MYfK|12|22-302-930-4756|2912.00|BUILDING|usly regular theodolites boost furiously doggedly pending instructio|
+114|Customer#000000114|xAt 5f5AlFIU|14|24-805-212-7646|1027.46|FURNITURE|der the carefully express theodolites are after the packages. packages are. bli|
+115|Customer#000000115|0WFt1IXENmUT2BgbsB0ShVKJZt0HCBCbFl0aHc|8|18-971-699-1843|7508.92|HOUSEHOLD|sits haggle above the carefully ironic theodolite|
+116|Customer#000000116|yCuVxIgsZ3,qyK2rloThy3u|16|26-632-309-5792|8403.99|BUILDING|as. quickly final sauternes haggle slyly carefully even packages. brave, ironic pinto beans are above the furious|
+117|Customer#000000117|uNhM,PzsRA3S,5Y Ge5Npuhi|24|34-403-631-3505|3950.83|FURNITURE|affix. instructions are furiously sl|
+118|Customer#000000118|OVnFuHygK9wx3xpg8|18|28-639-943-7051|3582.37|AUTOMOBILE|uick packages alongside of the furiously final deposits haggle above the fluffily even foxes. blithely dogged dep|
+119|Customer#000000119|M1ETOIecuvH8DtM0Y0nryXfW|7|17-697-919-8406|3930.35|FURNITURE|express ideas. blithely ironic foxes thrash. special acco|
+120|Customer#000000120|zBNna00AEInqyO1|12|22-291-534-1571|363.75|MACHINERY| quickly. slyly ironic requests cajole blithely furiously final dependen|
+121|Customer#000000121|tv nCR2YKupGN73mQudO|17|27-411-990-2959|6428.32|BUILDING|uriously stealthy ideas. carefully final courts use carefully|
+122|Customer#000000122|yp5slqoNd26lAENZW3a67wSfXA6hTF|3|13-702-694-4520|7865.46|HOUSEHOLD| the special packages hinder blithely around the permanent requests. bold depos|
+123|Customer#000000123|YsOnaaER8MkvK5cpf4VSlq|5|15-817-151-1168|5897.83|BUILDING|ependencies. regular, ironic requests are fluffily regu|
+124|Customer#000000124|aTbyVAW5tCd,v09O|18|28-183-750-7809|1842.49|AUTOMOBILE|le fluffily even dependencies. quietly s|
+125|Customer#000000125|,wSZXdVR xxIIfm9s8ITyLl3kgjT6UC07GY0Y|19|29-261-996-3120|-234.12|FURNITURE|x-ray finally after the packages? regular requests c|
+126|Customer#000000126|ha4EHmbx3kg DYCsP6DFeUOmavtQlHhcfaqr|22|32-755-914-7592|1001.39|HOUSEHOLD|s about the even instructions boost carefully furiously ironic pearls. ruthless, |
+127|Customer#000000127|Xyge4DX2rXKxXyye1Z47LeLVEYMLf4Bfcj|21|31-101-672-2951|9280.71|MACHINERY|ic, unusual theodolites nod silently after the final, ironic instructions: pending r|
+128|Customer#000000128|AmKUMlJf2NRHcKGmKjLS|4|14-280-874-8044|-986.96|HOUSEHOLD|ing packages integrate across the slyly unusual dugouts. blithely silent ideas sublate carefully. blithely expr|
+129|Customer#000000129|q7m7rbMM0BpaCdmxloCgBDRCleXsXkdD8kf|7|17-415-148-7416|9127.27|HOUSEHOLD| unusual deposits boost carefully furiously silent ideas. pending accounts cajole slyly across|
+130|Customer#000000130|RKPx2OfZy0Vn 8wGWZ7F2EAvmMORl1k8iH|9|19-190-993-9281|5073.58|HOUSEHOLD|ix slowly. express packages along the furiously ironic requests integrate daringly deposits. fur|
+131|Customer#000000131|jyN6lAjb1FtH10rMC,XzlWyCBrg75|11|21-840-210-3572|8595.53|HOUSEHOLD|jole special packages. furiously final dependencies about the furiously speci|
+132|Customer#000000132|QM5YabAsTLp9|4|14-692-150-9717|162.57|HOUSEHOLD|uickly carefully special theodolites. carefully regular requests against the blithely unusual instructions |
+133|Customer#000000133|IMCuXdpIvdkYO92kgDGuyHgojcUs88p|17|27-408-997-8430|2314.67|AUTOMOBILE|t packages. express pinto beans are blithely along the unusual, even theodolites. silent packages use fu|
+134|Customer#000000134|sUiZ78QCkTQPICKpA9OBzkUp2FM|11|21-200-159-5932|4608.90|BUILDING|yly fluffy foxes boost final ideas. b|
+135|Customer#000000135|oZK,oC0 fdEpqUML|19|29-399-293-6241|8732.91|FURNITURE| the slyly final accounts. deposits cajole carefully. carefully sly packag|
+136|Customer#000000136|QoLsJ0v5C1IQbh,DS1|7|17-501-210-4726|-842.39|FURNITURE|ackages sleep ironic, final courts. even requests above the blithely bold requests g|
+137|Customer#000000137|cdW91p92rlAEHgJafqYyxf1Q|16|26-777-409-5654|7838.30|HOUSEHOLD|carefully regular theodolites use. silent dolphins cajo|
+138|Customer#000000138|5uyLAeY7HIGZqtu66Yn08f|5|15-394-860-4589|430.59|MACHINERY|ts doze on the busy ideas. regular|
+139|Customer#000000139|3ElvBwudHKL02732YexGVFVt |9|19-140-352-1403|7897.78|MACHINERY|nstructions. quickly ironic ideas are carefully. bold, |
+140|Customer#000000140|XRqEPiKgcETII,iOLDZp5jA|4|14-273-885-6505|9963.15|MACHINERY|ies detect slyly ironic accounts. slyly ironic theodolites hag|
+141|Customer#000000141|5IW,WROVnikc3l7DwiUDGQNGsLBGOL6Dc0|1|11-936-295-6204|6706.14|FURNITURE|packages nag furiously. carefully unusual accounts snooze according to the fluffily regular pinto beans. slyly spec|
+142|Customer#000000142|AnJ5lxtLjioClr2khl9pb8NLxG2,|9|19-407-425-2584|2209.81|AUTOMOBILE|. even, express theodolites upo|
+143|Customer#000000143|681r22uL452zqk 8By7I9o9enQfx0|16|26-314-406-7725|2186.50|MACHINERY|across the blithely unusual requests haggle theodo|
+144|Customer#000000144|VxYZ3ebhgbltnetaGjNC8qCccjYU05 fePLOno8y|1|11-717-379-4478|6417.31|MACHINERY|ges. slyly regular accounts are slyly. bold, idle reque|
+145|Customer#000000145|kQjHmt2kcec cy3hfMh969u|13|23-562-444-8454|9748.93|HOUSEHOLD|ests? express, express instructions use. blithely fina|
+146|Customer#000000146|GdxkdXG9u7iyI1,,y5tq4ZyrcEy|3|13-835-723-3223|3328.68|FURNITURE|ffily regular dinos are slyly unusual requests. slyly specia|
+147|Customer#000000147|6VvIwbVdmcsMzuu,C84GtBWPaipGfi7DV|18|28-803-187-4335|8071.40|AUTOMOBILE|ress packages above the blithely regular packages sleep fluffily blithely ironic accounts. |
+148|Customer#000000148|BhSPlEWGvIJyT9swk vCWE|11|21-562-498-6636|2135.60|HOUSEHOLD|ing to the carefully ironic requests. carefully regular dependencies about the theodolites wake furious|
+149|Customer#000000149|3byTHCp2mNLPigUrrq|19|29-797-439-6760|8959.65|AUTOMOBILE|al instructions haggle against the slyly bold w|
+150|Customer#000000150|zeoGShTjCwGPplOWFkLURrh41O0AZ8dwNEEN4 |18|28-328-564-7630|3849.48|MACHINERY|ole blithely among the furiously pending packages. furiously bold ideas wake fluffily ironic idea|
+151|Customer#000000151|LlyEtNEXT6kkZ,kGP46H|19|29-433-197-6339|5187.02|HOUSEHOLD|regular dugouts: blithely even dolphins cajole furiously carefull|
+152|Customer#000000152|PDrllSkScKLh4lr19gmUZnK|8|18-585-850-3926|1215.18|BUILDING|ously ironic accounts. furiously even accounts accord|
+153|Customer#000000153|kDzx11sIjjWJm1|6|16-342-316-2815|5454.26|HOUSEHOLD|promise carefully. unusual deposits x-ray. carefully regular tithes u|
+154|Customer#000000154|2LAlU fDHkOqbXjHHDqw1mJQNC|19|29-522-835-6914|4695.12|FURNITURE|nic packages haggle blithely across the|
+155|Customer#000000155|l,sSphiStMgdrxpxi|0|10-566-282-8705|5902.85|AUTOMOBILE| sleep ironic, bold requests. regular packages on the quiet dependencies|
+156|Customer#000000156|5OS0edX2Y6B1cf9wJNuOQWgrrZccXk9|9|19-723-913-3943|9302.95|AUTOMOBILE| regular foxes above the theodolites haggle |
+157|Customer#000000157|HGEouzCcFrNd nBAdsCRjsMxKOvYZdbwA7he5w9v|15|25-207-442-1556|9768.73|BUILDING| pinto beans against the carefully bold requests wake quickly alongside of the final accounts. accounts|
+158|Customer#000000158|2HaYxi0J1620aoI1CdFyrW,rWOy|10|20-383-680-1329|6160.95|AUTOMOBILE|ecoys. fluffily quick requests use flu|
+159|Customer#000000159|KotsdDO6EHnysVu922s6pjZpG,vlT|10|20-888-668-2668|2060.06|HOUSEHOLD|cingly express somas haggle above the theodolites. pinto beans use special theodolites. theodolites sleep |
+160|Customer#000000160|5soVQ3dOCRBWBS|13|23-428-666-4806|4363.17|MACHINERY|olites. silently ironic accounts cajole furious|
+161|Customer#000000161|2oRkx,NtjFUh|7|17-805-718-2449|3714.06|MACHINERY|ptotes nag carefully instructions. silent accounts are. furiously even accounts alongside|
+162|Customer#000000162|JE398sXZt2QuKXfJd7poNpyQFLFtth|8|18-131-101-2267|6268.99|MACHINERY|accounts along the doggedly special asymptotes boost blithely during the quickly regular theodolites. slyly |
+163|Customer#000000163|OgrGcOnm4whd0f|21|31-863-349-4121|2948.61|FURNITURE| nag furiously furiously final requests. slyly s|
+164|Customer#000000164|YDW51PBWLXLnbQlKC|4|14-565-638-9768|208.45|HOUSEHOLD|ironic, special pinto beans. ironic|
+165|Customer#000000165|8pc6kwBmwBdEnfVP53aqL9DM4LymC4|0|10-927-209-5601|3349.92|HOUSEHOLD| requests. final ideas cajole quickly at the special, ironic acco|
+166|Customer#000000166|15HWGtwoP77EJfd95HxtMSTZUelV8NOKne2|10|20-320-530-5920|2042.21|FURNITURE|the packages. blithely final packages are furiously unusual asymptotes. regular frets promise carefully u|
+167|Customer#000000167|QNc2eOlRIzL6jpthwgDuB866uCIUPiOX|5|15-288-395-5501|1468.09|AUTOMOBILE|espite the ironic excuses. furiously final deposits wake slyly. slyly ex|
+168|Customer#000000168|GDcL5qU86P8,oaTwVBCLE6asM8rlxpE,211uziU|12|22-354-984-5361|-808.56|FURNITURE|blithely final accounts sleep quickly along the regular ideas. furiously sly foxes nag across the|
+169|Customer#000000169|NjhmHa7xrcjE|18|28-362-499-3728|4483.83|FURNITURE|fully unusual pinto beans. blithely express asymptotes lose carefully regular instructions? accounts b|
+170|Customer#000000170|5QmxmYubNhn6HAgLwTvphevM3OmpZTGsM|15|25-879-984-9818|7687.89|BUILDING| regular requests. carefully regu|
+171|Customer#000000171|RIhjJCrth89EU7xRSvN|7|17-513-603-7451|2379.91|MACHINERY|ly furiously final requests. slyly final requests wake silently pending, silent accounts. exp|
+172|Customer#000000172|KwgdKUL1G2WacsMNF50yX|22|32-178-964-1847|1134.40|MACHINERY|losely regular, unusual instructions. |
+173|Customer#000000173|Aue7KVz,FinSHpov Vk5ed,wSQ2BRSioJ0|9|19-443-196-8008|845.84|BUILDING|s pinto beans use thinly slyly regular packages. instructions print along the s|
+174|Customer#000000174|R5 fCPMSeDXtUpp5Ax|23|33-845-455-8799|1944.73|FURNITURE|oldly even requests haggle quickly blithely ironic accounts. idly final foxes doze slyly pending dep|
+175|Customer#000000175|8YK1ZyTqoY3wMWnExl4itPMLL793GpEZb6T|10|20-427-617-9922|1975.35|FURNITURE|ly final platelets are final pinto b|
+176|Customer#000000176|9hBepY2uz88HlCqToOLgeU770u81FeL|13|23-432-942-8830|-375.76|FURNITURE|uriously. final requests sleep ironic packages. quickly|
+177|Customer#000000177|6wzEKPyZE9dmBCJZ8e7x7fiiK,k|1|11-917-786-9955|7457.50|BUILDING|nal dolphins: blithely bold gifts wake slyly afte|
+178|Customer#000000178|p HUSDg8Cgan4Fj8Drvcdz4gi4dSqV0a7n 0ag|21|31-436-268-6327|2272.50|FURNITURE|unts. blithely regular dependencies kindle pending deposits. quietly express deposits wake above the Tiresias-- ex|
+179|Customer#000000179|djez3CWg0nnCiu60jsF|4|14-703-953-2987|-43.08|MACHINERY|st furiously. idly regular instructions wake fluffily slyl|
+180|Customer#000000180|DSGW3RFoYJE opVw,Y3wGCGcNULZi|13|23-678-802-2105|-92.58|FURNITURE|lar accounts sublate above the slyly final|
+181|Customer#000000181|YNviWd WrRkZvSw1OxIewBq|9|19-653-305-8440|3929.96|FURNITURE|final requests cajole furiously acro|
+182|Customer#000000182|tdwvgepG316CCTHtMaF8Q|3|13-199-211-9023|4810.22|AUTOMOBILE|quickly against the blithely even deposits; epitaphs unwind quickly along the carefully regular excuses. furio|
+183|Customer#000000183|aMAB2QSb8 86MAx|22|32-771-279-8154|4419.89|HOUSEHOLD|sual accounts across the slyl|
+184|Customer#000000184|uoOpBuRr42f1WIqnVYAhxbAA9bkK6HUGpOt|21|31-739-340-5476|170.46|AUTOMOBILE|hely according to the furiously unusual accounts. furiously bold platele|
+185|Customer#000000185|iHXzQgienOQ|5|15-760-572-8760|2788.76|BUILDING|t the ironic accounts. fluffily regular requests wake slyly ironic pinto beans. slyly unusu|
+186|Customer#000000186|BeVr6MzaobBENXRBC8pmOmkByMJI|3|13-518-743-2576|8737.50|HOUSEHOLD|e slyly final dependencies. unusual instructions against the carefully pending instructions boost quickly|
+187|Customer#000000187|OIlgR6oIRXV5g63q5YGudCjRD8kpod2p|4|14-716-294-6674|-774.22|FURNITURE|r deposits. carefully silent packages after the fluffily even instructio|
+188|Customer#000000188|58Srs6gEEoD3ZfwgXDM1OayRiaSY6K9YsveWwV|5|15-613-528-7811|9533.37|BUILDING|st slyly special platelets. bold,|
+189|Customer#000000189|r51HSq Rg8wQgF1CBfG1Vbye3GK|22|32-980-348-1114|-594.05|MACHINERY|sly express patterns. ideas on the regular d|
+190|Customer#000000190|F2X,GhSqLz8k u0gWsirsraFaEDEo6vIGtOTaO1T|11|21-730-373-8193|1657.46|AUTOMOBILE|uickly-- fluffily pending instructions boo|
+191|Customer#000000191|P1eCXsPWkv2y6ENQv|16|26-811-707-6869|2945.16|BUILDING|o beans hinder slyly bold accounts.|
+192|Customer#000000192|rDmB2c9d1BJQ y6R9jTx86YI77D|10|20-750-712-2481|8239.96|MACHINERY|ely unusual packages are fluffily |
+193|Customer#000000193|dUT4dtsPTZ6ZpkWLc,KGJCHY6JDJgPFH4|23|33-182-978-6287|8024.55|MACHINERY|y even theodolites. final foxes print along the final pinto beans. theodoli|
+194|Customer#000000194|mksKhdWuQ1pjbc4yffHp8rRmLOMcJ|16|26-597-636-3003|6696.49|HOUSEHOLD|quickly across the fluffily dogged requests. regular platelets around the ironic, even requests cajole quickl|
+195|Customer#000000195|WiqQD8hscyKekjMcSBA7AX 0AbxvBV|22|32-757-684-6845|4873.91|AUTOMOBILE| should detect blithely. quickly even packages above the deposits wak|
+196|Customer#000000196|68RstNo6a2B|18|28-135-177-2472|7760.33|FURNITURE|accounts wake. express instructions according to the s|
+197|Customer#000000197|UeVqssepNuXmtZ38D|1|11-107-312-6585|9860.22|AUTOMOBILE|ickly final accounts cajole. furiously re|
+198|Customer#000000198|,7fcZHIUn,fUaQtK8U,Q8|1|11-237-758-6141|3824.76|AUTOMOBILE|tions. slyly ironic waters wa|
+199|Customer#000000199|lBU3xll,a7e9TYm3 UyjDPCVMvnHKpq,9HW1X|4|14-136-924-5232|7654.31|FURNITURE|fully busy pinto beans. packages cajole around the express, bold packages! quickly ironic tithes|
+200|Customer#000000200|x1 H5c66DUgH2pgNTJhw6eZKgrAz|16|26-472-302-4189|9967.60|BUILDING|e after the ironic, even realms. fluffily regular packages doze-- courts haggle carefully! blithely|
+201|Customer#000000201|yWLtmd5usyjsCvyL1QJsBorC|2|12-759-183-9859|4614.40|MACHINERY| blithely even packages sleep carefully bold, unus|
+202|Customer#000000202|Q0uJ1frCbi9yvu|7|17-905-805-4635|2237.64|AUTOMOBILE|fully along the carefully pending Tiresias; special packages along the carefully special deposits try to|
+203|Customer#000000203|2fRlubh lWRinCs1nimADdn|1|11-886-563-6149|7960.63|MACHINERY| packages are. requests integrate regularly across th|
+204|Customer#000000204|7U7u2KryFP|6|16-761-837-4820|-627.76|BUILDING|ages. accounts wake slyly. dolphins nag blithely. final, regular requests haggle blithely furiously even |
+205|Customer#000000205|jOTQBGb nhfBMu3,LIN62WogLDBO0w|12|22-356-437-1311|7161.52|BUILDING| furiously pending accounts. ideas along the slyly final deposits cajole blithel|
+206|Customer#000000206|xsg,ehRHS5OKqyBR5YtoPm8myz|9|19-976-832-3312|-274.79|AUTOMOBILE| the carefully regular foxes. regular accounts wake furiously braids. bold ideas are carefu|
+207|Customer#000000207|ewz5JNnxJPmPGY|21|31-562-675-6475|-439.98|AUTOMOBILE|n theodolites against the evenly even requests boost carefully pinto beans! fi|
+208|Customer#000000208|Abye1MwcNfY0KO6yqv,Wwe|19|29-859-139-6234|6239.89|MACHINERY|le carefully according to the quickly silent packages. quickly ironic packages affix according to the ruthles|
+209|Customer#000000209|iBvmxOZV3qXMYQW3W4Oo7YFhdV|16|26-207-121-7721|8873.46|FURNITURE|deposits. furiously regular ideas across the quietly regular accounts cajole about the express packages. quickly reg|
+210|Customer#000000210|13cFL9sG1nrGERURN9WZI0|20|30-876-248-9750|7250.14|HOUSEHOLD|nusual instructions sleep regular acc|
+211|Customer#000000211|URhlVPzz4FqXem|13|23-965-335-9471|4198.72|BUILDING|furiously regular foxes boost fluffily special ideas. carefully regular dependencies are. slyly ironic |
+212|Customer#000000212|19U0iZ3GtDdrsn|7|17-382-405-4333|957.58|BUILDING|symptotes are blithely special pinto beans. blithely ironic |
+213|Customer#000000213|NpqMYBhBcWk8mnEta|24|34-768-700-9764|9987.71|HOUSEHOLD|al deposits. final instructions boost carefully. even deposits sleep quickly. furiously regul|
+214|Customer#000000214|MpCwhcLrbcIM7AeKS9tRM09by|8|18-180-678-6165|1526.59|MACHINERY|grow. fluffily regular pinto beans according to the regular accounts affix quickly pe|
+215|Customer#000000215|8H76xbBhde HY70BrYqGEFmVPXqlm8pgjjxh|9|19-564-446-4758|3379.20|FURNITURE|al pinto beans. ironic foxes serve. i|
+216|Customer#000000216|LXH7wSv4I6GG6TAkLOyLcMh559a8Y|21|31-296-111-5448|-776.08|FURNITURE|hely at the pending warhorses; blithe|
+217|Customer#000000217|YIy05RMdthrXqdfnNKud|23|33-159-298-3849|378.33|AUTOMOBILE|ven frays wake according to the carefully |
+218|Customer#000000218| V1FCIeSseuyNGYfHS Rx0,sc4IsBfReV|4|14-480-931-8567|9541.19|MACHINERY|lar courts. furiously pending dependencies cajole blithely? fluffily regular deposits cajol|
+219|Customer#000000219|eTjiL01eyoKiAe2WQoz3EpPg2lvSLeOu2X2wyxK|11|21-159-138-6090|9858.57|AUTOMOBILE|ckly multipliers. carefully eve|
+220|Customer#000000220|TbUHVhkttz|16|26-201-301-7371|9131.64|BUILDING| even, even accounts are. ironic |
+221|Customer#000000221|ripNyyPOewg8AahnZlsM|16|26-351-738-1001|1609.39|BUILDING| instructions above the regular requests cajole packages. pending, even |
+222|Customer#000000222|gAPkFjwxX1Zq 2Yq6 FIfLdJ4yUOt4Al7DL18Ou|1|11-722-672-5418|8893.76|BUILDING|regular accounts haggle furiously around the c|
+223|Customer#000000223|ftau6Pk,brboMyEl,,kFm|20|30-193-643-1517|7476.20|BUILDING|al, regular requests run furiously blithely silent packages. blithely ironic accounts across the furious|
+224|Customer#000000224|4tCJvf30WagGfacqcAqmfCptu2cbMVcj2M7Y0W|15|25-224-867-3668|8465.15|BUILDING|counts. bold packages doubt according to the furiously pending packages. bold, regular pinto beans |
+225|Customer#000000225|2HFk1E0fmqs|13|23-979-183-7021|8893.20|AUTOMOBILE|ages boost among the special foxes. quiet, final foxes lose carefully about the furiously unusual th|
+226|Customer#000000226|ToEmqB90fM TkLqyEgX8MJ8T8NkK|3|13-452-318-7709|9008.61|AUTOMOBILE|ic packages. ideas cajole furiously slyly special theodolites: carefully express pinto beans acco|
+227|Customer#000000227|7wlpEBswtXBPNODASgCUt8OZQ|13|23-951-816-2439|1808.23|MACHINERY|lar, ironic pinto beans use! quickly regular theodolites maintain slyly pending pac|
+228|Customer#000000228|A1Zvuxjdpt8TZP6i41H3fn9csGqOJUm5x0NIS1LA|20|30-435-915-1603|6868.12|FURNITURE| blithely ironic theodolites |
+229|Customer#000000229|Sbvjxgmwy4u6Ks1FH7lxo7toMmeU5dG|1|11-243-298-4029|7568.07|BUILDING|bold accounts haggle furiously even deposits. regular instruct|
+230|Customer#000000230|CnR8xt3MYqID0tiHwYh|21|31-744-950-8047|1682.83|MACHINERY|c decoys impress even deposits. thinly final asymptotes |
+231|Customer#000000231|WFOhG9Z9ohRdsyuYnPvBSv|10|20-825-880-1065|283.55|BUILDING|ly final deposits. fluffily ironic requests wake carefully carefully regular accounts. quickly sp|
+232|Customer#000000232|oA9o,3YOXu2rzKONdd,cxpqCFXUv5kuxBYKp|22|32-283-563-2674|554.71|HOUSEHOLD|ges sleep. final, bold theodolites are quickly final packages. furiously ironic packages are slyly fi|
+233|Customer#000000233|mFm45wZ7rV4VIbEE1F4|3|13-574-104-3221|3998.24|FURNITURE|st the special instructions. theodolites detect blithely according |
+234|Customer#000000234|ILyuJbixVmrNEVxsfQOMFxByySs|18|28-243-424-1393|8383.51|AUTOMOBILE| fluffily regular ideas play s|
+235|Customer#000000235|bp0rIBMh4fMdQnHBmMnB|3|13-350-790-6416|754.41|AUTOMOBILE|hely ruthless instructions again|
+236|Customer#000000236|kcW,mM0rhIstAcVaol1,6DVkS FPKlhY|14|24-808-967-4503|5384.59|AUTOMOBILE|te slyly along the requests. carefully final requests sleep slyly blithe frets. furiously ruthless dep|
+237|Customer#000000237|R dtznB5ocPPo|19|29-414-970-5238|-160.02|HOUSEHOLD|regular pinto beans sleep furiously ironically silent theodolites. quickly ironic courts after the deposits sleep f|
+238|Customer#000000238|tE0lVKK3tz5AG2 Hal2XHwE485g5MX7|16|26-307-925-1236|3482.32|HOUSEHOLD|uffily ironic theodolites are. regular, regular ideas cajole according to the blithely pending epitaphs. slyly |
+239|Customer#000000239|w8eRmMOmUTjVOkucbfcGDh2AqCixTOC|9|19-699-117-6988|5398.77|FURNITURE|uctions. furiously even dolphins haggle fluffily according to the furiously regular dep|
+240|Customer#000000240|SXfeEOwRZsXArtY3C5UWqXgLcJBAMmaynaTJs8|9|19-756-548-7835|7139.68|MACHINERY|al accounts about the slyly pending p|
+241|Customer#000000241|FBuwHkPR450PvnZnAezcaeMaS,hX3Ifdk|9|19-344-614-2207|6569.34|AUTOMOBILE| across the enticingly even requests. blithely iro|
+242|Customer#000000242|apgzK3HWAjKHFteJ16Tg3OERViesqBbx|3|13-324-350-3564|1975.41|MACHINERY|riously ironic pinto beans cajole silently. regular foxes wake slyly. bravely |
+243|Customer#000000243|te2FOn8xJzJinZc|7|17-297-684-7972|620.73|AUTOMOBILE|nic deposits. evenly pending deposits boost fluffily careful|
+244|Customer#000000244|FBVbCpEVaFaP8KogqQO2VuXeVx|15|25-621-225-8173|2506.38|HOUSEHOLD|encies. requests nag carefully. regularly final accounts h|
+245|Customer#000000245|IseFIO7jTGPTzAdZPoO2X4VX48Hy|12|22-952-232-2729|3720.15|MACHINERY|s. regular foxes against the s|
+246|Customer#000000246|WrRUR0ds6iypmopww9y9t0NJBRbke78qJm|15|25-608-618-2590|9584.96|AUTOMOBILE| requests shall have to integrate furiously pending courts. sil|
+247|Customer#000000247|N8q4W4QQG2mHY47Dg6|20|30-151-905-3513|8495.92|HOUSEHOLD|es affix furiously regular deposits. blithely ironic asymptotes after the blithely e|
+248|Customer#000000248|mgT15r8asLyaED|10|20-447-727-8914|8908.35|FURNITURE|s detect blithely. blithely pending dolphins along the fluffily final accounts haggle fu|
+249|Customer#000000249|0XE2fhX7j2uivaHBrFuRl1NoJnaTSIQT|3|13-226-455-7727|-234.01|MACHINERY|its are after the special deposits. ironic, final deposits against the slyl|
+250|Customer#000000250|9hif3yif6z8w8pW88F755PU7uz|16|26-464-852-1461|2869.97|FURNITURE|s. slyly unusual instructions cajole quickly carefully bold dep|
+251|Customer#000000251|Z9fdQmv07C3k hxwt9nchhuQiqC4wox85se8EW7L|13|23-975-623-5949|9585.32|HOUSEHOLD|fully blithely regular requests. fluffily even dugouts detect furiously final ideas. sometimes ironic depos|
+252|Customer#000000252|db1bPFF xUkJYzvE3cBtqYeDn2 u|16|26-330-347-9201|3561.74|FURNITURE|ngside of the pending foxes. furiously ironic requests wake. blithely ironic acco|
+253|Customer#000000253|naGyIRPFPH E|15|25-461-140-9884|9139.52|AUTOMOBILE| regular deposits sleep against the accounts. foxes cajole carefully special |
+254|Customer#000000254|vQ,pEzMQaFgJzK4TJ2eA|1|11-451-622-6325|1915.35|MACHINERY|equests. carefully ironic deposits detect carefully abo|
+255|Customer#000000255|I8Wz9sJBZTnEFG08lhcbfTZq3S|3|13-924-679-8287|3196.07|BUILDING|ges mold. bold, regular courts boost furiously at the |
+256|Customer#000000256|eJ6AggYh80JMEzZNwYK4CIC2flT|10|20-229-271-4429|1299.92|HOUSEHOLD|ld boost about the carefully ironic foxes. slyly special packages cajole alongside of the slyly final accounts. q|
+257|Customer#000000257|LyIa26EXYaSU|7|17-816-687-2155|-339.85|AUTOMOBILE|s cajole quickly along the ironic pinto beans: even, regular foxes are |
+258|Customer#000000258|7VbADek8qYezQYotxNUmnNI|12|22-278-425-9944|6022.27|MACHINERY|about the regular, bold accounts; pending packages use furiously stealthy warhorses. bold accounts sleep fur|
+259|Customer#000000259|kwh9i86Wj1Zq14nwTdhxapIkLEI|5|15-907-674-2046|3335.29|HOUSEHOLD|furiously unusual instructions. s|
+260|Customer#000000260|CrHY2zx4vner4|1|11-708-529-9446|9196.11|MACHINERY|carefully. furiously bold accounts nag furiously carefully regular accounts-- final decoys prin|
+261|Customer#000000261|dXkVi8qahjP|12|22-494-898-7855|7094.22|AUTOMOBILE|he special instructions integrate carefully final request|
+262|Customer#000000262|DcUOAFBxMu8oGKvIqbDx7xgeZ|4|14-698-169-5201|1561.80|AUTOMOBILE|ress packages above the ironic accounts are against the ironic pinto beans. carefully final accoun|
+263|Customer#000000263|Y2pxeGWkTyaq,0RCzIbZ3|1|11-276-906-3193|1162.03|FURNITURE|usly ironic theodolites cajole furiously. final ep|
+264|Customer#000000264|24Akixb4hqpRD|11|21-881-683-3829|3195.83|MACHINERY|ular packages cajole blithely a|
+265|Customer#000000265|sthiqpj6CPAKbD7BBSz9ulRuF9d,ebfaiTc|17|27-716-734-2046|8275.80|MACHINERY|lar, ironic platelets. furiously unu|
+266|Customer#000000266|VSIEruiMdDvjDaTQxkuK60Yw3AGxO|0|10-474-243-3974|5481.00|HOUSEHOLD|ccounts. quickly ironic excuses after the regular foxes wake along the ironic, fina|
+267|Customer#000000267|el7 bYzj1USp6T5i3KpfZ43jKegbdO,Jd69|15|25-402-954-8909|3166.94|AUTOMOBILE| detect slyly alongside of the foxes. closely regular pinto beans nag quickly of the blithely bold r|
+268|Customer#000000268|tkSLQoOpfOa601itad05EcN0UmhjZXdyKRc0r|3|13-720-469-5207|6821.01|MACHINERY|press ideas print quickly. fluffily unusual deposits use blithely eve|
+269|Customer#000000269|J7kLF9iPOQA 7CVwAmQRpwfZPDJ2q5Seu2Vj1gh|14|24-570-874-6232|7667.35|MACHINERY| close packages-- quickly regular instructions sleep. carefully |
+270|Customer#000000270|,rdHVwNKXKAgREU|7|17-241-806-3530|9192.50|AUTOMOBILE|ldly final instructions mold carefully along the ironic accounts.|
+271|Customer#000000271|O,HceV3 XE77Yx|6|16-621-282-5689|1490.35|MACHINERY|ly pending deposits cajole slyly sl|
+272|Customer#000000272| YDjKpjXEe0A6rDE|2|12-324-877-9650|-746.03|MACHINERY|he regular requests. slyly special |
+273|Customer#000000273|sOA,alhAw1juArjRLOd|2|12-197-772-5736|-675.05|FURNITURE|ng frets sleep. slyly express dolphins doubt ironically ironic accounts. final de|
+274|Customer#000000274|adesXwNumnPqsKgsE1groEAwdKNgZ|19|29-330-389-1442|4425.42|FURNITURE|gular dependencies. ironic foxes haggle du|
+275|Customer#000000275|M1UCTKrZLOgSyr|22|32-194-864-6861|5067.31|AUTOMOBILE|y regular deposits. fluffily ironic packages cajole along the |
+276|Customer#000000276|iSWxETEMKe5cF|16|26-716-357-3851|2292.67|AUTOMOBILE|eans. even, ironic accounts affix sl|
+277|Customer#000000277|BWGsQevHk0BfRJV3RRB ElFc|23|33-696-831-5394|8876.10|BUILDING|phins; bold, final accounts print. carefully silent |
+278|Customer#000000278|4jqLjG 2aeYMFEJi|20|30-445-570-5841|7621.56|BUILDING| pending, express requests cajole carefully special packages. blithely pending accounts affix furiously. fluffily |
+279|Customer#000000279|9t2Wo1jK1TYnDGg6ODSMGf1W9hRT3F3VK5zxJOC|9|19-220-605-9025|9663.23|AUTOMOBILE|l platelets sleep fluffily against the fluffily enticing excuses. blithely special requests wake somet|
+280|Customer#000000280|3fDiGmN64En0ei|11|21-537-461-3965|3952.84|BUILDING|accounts. quiet deposits sleep. slyly even instructions detect about the blithely bold instru|
+281|Customer#000000281|x5gJ8IGm2Fo9Jigt|6|16-809-382-6446|4361.70|BUILDING|fully quiet ideas detect quickly even packages. regular instructions accor|
+282|Customer#000000282|wcCc, y1996DnOwnXu1i|18|28-251-599-2415|1125.45|HOUSEHOLD|ole daringly against the carefully ir|
+283|Customer#000000283|jysHDuTKUKYEjdBhtI LqTttTp 7i2kvx1 O3A|7|17-111-303-1282|4450.03|AUTOMOBILE|y alongside of the accounts. slyly express dependencies wake quickly above the carefully ironic package|
+284|Customer#000000284|2ZgAkaBgb6aigORfIfUd3kHbPi42|6|16-161-235-2690|593.52|AUTOMOBILE|lar gifts. carefully even deposits boost! furiously even braids use afte|
+285|Customer#000000285|ApUL7bgFMUXGXewpoQyQOSnLeL9Vc1rrkW |20|30-235-130-1313|7276.72|FURNITURE|dolphins after the slyly ironic packages boost furiously among the furiously pending theodolites. bl|
+286|Customer#000000286|7 7uVDrpkWuozyEd|22|32-274-308-4633|-109.73|HOUSEHOLD|ly special accounts haggle slyly slyly fluffy req|
+287|Customer#000000287|KTsaTAJRC0eMYkyFm7EK3eeamHs7s|4|14-330-840-6321|1734.18|MACHINERY|requests. bold, silent depths lose f|
+288|Customer#000000288|eEs5rwc9AOJaKhvV|2|12-674-136-5397|5339.43|HOUSEHOLD| furiously about the carefully ironic packages. express reques|
+289|Customer#000000289|NUilehg0nVOkK3K1SW0,BAHCeST2JqKzuTMoGS|10|20-456-773-7693|-215.75|AUTOMOBILE|ending foxes across the carefully|
+290|Customer#000000290|8OlPT9G 8UqVXmVZNbmxVTPO8|4|14-458-625-5633|1811.35|MACHINERY|sts. blithely pending requests sleep fluffily on the regular excuses. carefully expre|
+291|Customer#000000291|ZlLNbGxnQYMubQ9K|8|18-657-656-2318|4261.68|HOUSEHOLD|e slyly silent deposits. bold deposits haggle slyly special packages. furiously bold requests cajole carefully abo|
+292|Customer#000000292|hCXh3vxC4uje9|11|21-457-910-2923|2975.43|HOUSEHOLD|usly regular, ironic accounts. blithely regular platelets are carefully. blithely unusual ideas affi|
+293|Customer#000000293|7ynwX7lZ3o2cmAWSkKAc3edKa 8yT|2|12-887-984-5485|-43.79|MACHINERY|ironic foxes are final packages. requests about the furiousl|
+294|Customer#000000294|hSaNqI1P2IyEFHY0r0PsPkMqt|18|28-187-946-4260|-994.79|BUILDING|bold packages. regular, final asymptotes use quickly fluffily even waters. blithely express requests wake into th|
+295|Customer#000000295|mk649IH6njR14woTVZ1cxtlNs URxBHD5o5z2|0|10-340-773-4322|9497.89|HOUSEHOLD|play according to the quickly ironic instructions-- unusual, bol|
+296|Customer#000000296|4eyqk2zpg4m V JGEtgwNmCq3c|15|25-875-178-1959|8081.52|BUILDING|es need to affix furiously. ironic, final foxes are against the regular instructions: pinto beans haggle q|
+297|Customer#000000297|hzg,409pj0|15|25-915-518-8800|7096.32|HOUSEHOLD|de of the regular asymptotes detect slyly ironic theod|
+298|Customer#000000298|jFKF3w 8aegECg7mP,qtuR9IsTSYQlEXq|21|31-542-157-4074|3812.84|BUILDING|sleep slyly. stealthy, bold pinto beans sleep blit|
+299|Customer#000000299|3F3Q0fTkjIv1UfJbcN7|4|14-948-474-7353|5380.50|HOUSEHOLD|tes sleep fluffily. furiously regular requests boost fluffily evenly even asympt|
+300|Customer#000000300|I0fJfo60DRqQ|7|17-165-193-5964|8084.92|AUTOMOBILE|p fluffily among the slyly express grouches. furiously express instruct|
+301|Customer#000000301|FtFq9Cgg5UAzUL|7|17-265-345-9265|9305.05|HOUSEHOLD|ular, regular notornis sleep along the furiously pending foxes|
+302|Customer#000000302|cJ3cHoAjAiaxTU2t87EJM|4|14-152-594-2967|1107.42|MACHINERY|dolphins haggle fluffily across the final requests. regularly unusual sentiments detect fluffily requests. regular|
+303|Customer#000000303|5pSw0OIoNRcpyTEEI1gZ6zRMyJ0UGhJdD|3|13-184-254-6407|9339.57|AUTOMOBILE|mise ironically against the unusual foxes. deposits cajole asymptotes. ironic ideas shall have to sleep|
+304|Customer#000000304|Cilvb3k8ghDX4|0|10-321-698-7663|9217.55|MACHINERY|s integrate at the carefully ironic instructions. fin|
+305|Customer#000000305|x8kcl,R4Wk|11|21-250-654-3339|4356.59|FURNITURE|nts. even, regular courts nag. dugouts use blithely a|
+306|Customer#000000306|ADoOEIr5aQcLIoGJM6nCvPEP 91|10|20-109-305-9629|3268.01|AUTOMOBILE|ill have to are. final, express deposits hag|
+307|Customer#000000307|xvkJ13gs7GH|13|23-836-934-5394|346.59|FURNITURE| ironic platelets nag against the bold pinto |
+308|Customer#000000308|c9WuNBiEYmGxeBmZaELg WWb|9|19-992-128-2013|4150.76|HOUSEHOLD|ilent accounts haggle carefully unusual dolphins. carefully regular requests wake along the |
+309|Customer#000000309|6Jg4ECVS2u7i,E|21|31-231-377-9535|8824.78|FURNITURE|lyly. furiously enticing instructions haggle. carefull|
+310|Customer#000000310|QZnc5mkLIPh6JGrzcHmRzCiL0AmdE92vyM|1|11-838-647-9285|3186.57|FURNITURE|mise fluffily blithely ironic courts|
+311|Customer#000000311|dvpNARle3mR19GD4s2gpEbkL2mZV3uvV6P|23|33-919-292-8822|6589.50|AUTOMOBILE|essly even escapades. blithely regular Tiresias cajole blithely furiously close packages. furiously ironic pi|
+312|Customer#000000312|cH6XucXV0V|6|16-316-482-2555|-178.84|AUTOMOBILE|e slyly. furiously regular pinto beans wake slyly according to the fluffily even excuses. ca|
+313|Customer#000000313|Ay52vCrTXsSmp7TmQ1kujvuItfLGx|0|10-401-786-6040|6115.81|HOUSEHOLD|g to the even dependencies. accoun|
+314|Customer#000000314|8,tdTVYGYoYRaAKwG 6aDJna4Cfjt,F9DDCC2|13|23-366-243-4713|2394.92|MACHINERY|ets alongside of the slyly pending pinto bean|
+315|Customer#000000315|pXaKKTCTyc UI3tglBaWRimosymG6ZyOCyb6Vb3M|7|17-442-286-3594|348.58|FURNITURE|s. slyly regular sentiments are carefully. slyly ironic asymptot|
+316|Customer#000000316|zE dN3aqjaG|8|18-171-394-5011|4571.78|MACHINERY|egular ideas cajole around the ironic, pending deposits. furiously pending dolphins serve blithely regular |
+317|Customer#000000317|uOeuL8DG1j|19|29-615-537-8871|956.88|HOUSEHOLD|ages. hockey players are. dependencie|
+318|Customer#000000318|PtJQn0IjYtShb1f2uYTYBnnmUeGNiwcALU|0|10-229-548-7118|9149.98|HOUSEHOLD|nding requests. special, bold instruction|
+319|Customer#000000319| UQ5mF3sdoZT2|6|16-734-928-1642|1834.36|FURNITURE| packages use slyly always ironic deposits. unusual, even notornis above|
+320|Customer#000000320|pO8rWSwK j|12|22-358-857-3698|6082.74|MACHINERY|ing requests. furiously regular accounts hinder slyly. final, regular theodolites against the slyly quiet requests|
+321|Customer#000000321|g3,8g XHACSvjZtJuiNk5BYiyPFnIxg|20|30-114-675-9153|7718.77|FURNITURE|special requests! express dugouts can affix furiously blithely regular platelets. fu|
+322|Customer#000000322|bWRyCyjH5OfGX|20|30-660-202-7517|4489.98|HOUSEHOLD|usual sauternes are among the slyly even instructions! thinly regular |
+323|Customer#000000323|ZLnVZ CXRi2,QDrlo|18|28-347-223-6024|1137.67|AUTOMOBILE|ely special foxes. express, final excuses across the packages are quickly amon|
+324|Customer#000000324|fiW1n6dLSVRkXj7kU1768UI2w1vMxEde5a |2|12-722-560-7023|806.59|FURNITURE|, regular requests kindle slyly furio|
+325|Customer#000000325|Z I43vl3ta3iYmjXNaSM d6Pe24ibjhdvPSi|15|25-823-702-9630|2377.34|HOUSEHOLD|nal foxes alongside of the always bold|
+326|Customer#000000326|nWDOTh6X019pfBtV3ikACYZiMjGykLrFnuyAo2|2|12-447-614-7494|1906.52|HOUSEHOLD|ckey players. carefully ironic a|
+327|Customer#000000327|UyKulwfNnX4l4ba1vQtwCWw8WNP50U8DCU|8|18-606-718-3062|8762.16|MACHINERY| unusual braids. daringly final ideas are quickly c|
+328|Customer#000000328|9pu j2HoEf1uhiY3jxE9l9fCRfjoVU|5|15-817-180-1487|6709.90|BUILDING|y about the daring accounts. furiously thin escapades integrate furiously against the furiously ironi|
+329|Customer#000000329|67r6XnIxUVgAc3pRX8tmGOw|11|21-106-357-8302|-651.91|BUILDING|ans. fluffily unusual instructions haggle about the slyly ironic platelets. never regular pinto beans sleep fl|
+330|Customer#000000330|UfNb7T9CTCnsfN3b|20|30-476-852-2371|8244.73|MACHINERY|en pinto beans. quickly final excuses haggle furiously. slyly pendin|
+331|Customer#000000331|Ug e2IBbl,LJuqjNz5XeQV|5|15-411-430-7917|170.27|AUTOMOBILE|r the silent ideas. carefully ironic deposits was carefully above the furiously even excuses. evenly regu|
+332|Customer#000000332|Jfosq,G6ziag7M04IvCx7SMRafyYvSI,Do|22|32-767-972-2596|-267.09|HOUSEHOLD| around the pinto beans. final theodolites haggle|
+333|Customer#000000333|heiloGYs Yey7NKhEFoiNhUBb,QFbjtn5wt|11|21-908-534-7709|8018.89|AUTOMOBILE|uriously close theodolites! slyly express foxes cajole-- final pinto beans boost blithely along the ironic|
+334|Customer#000000334|OPN1N7t4aQ23TnCpc|4|14-947-291-5002|-405.91|BUILDING|fully busily special ideas. carefully final excuses lose slyly carefully express accounts. even, ironic platelets ar|
+335|Customer#000000335|d2JCYLr2F9tC1AZMIvbIYPDQA|21|31-772-165-3138|6837.46|HOUSEHOLD| requests haggle carefully about the quickly special escapades. regular a|
+336|Customer#000000336|yC zy1i6AGrnykrV McJyjg|2|12-345-190-9898|9241.49|AUTOMOBILE|es. dependencies lose carefully blithely regular deposits. t|
+337|Customer#000000337|EluRTlO4pE7u0XSKKyvKvVyt4sADWFRLZuiyn|0|10-337-165-1106|-270.59|MACHINERY|ld requests sleep quickly. carefully express tithes wake carefully ac|
+338|Customer#000000338| aiYAeWgI0okGSJv7OgvKqMvPLhxF3blT8josX|23|33-302-620-7535|4092.49|FURNITURE|ckages nag blithely regular requests: carefully final packages between the slyly regular instructions sleep |
+339|Customer#000000339|jUs1Im28boIduGhp5vbKK50gM5ov7xH9G|24|34-992-529-2023|8438.07|HOUSEHOLD|ix. ironic, special tithes detect dog|
+340|Customer#000000340|WRnPrKQmAmoMQgHQERoVOhyTklcHMajJlc|2|12-730-681-4571|4667.12|BUILDING|es sleep according to the even, unusual Tiresias. carefully bold packages haggle. furiously pending s|
+341|Customer#000000341|4,zQfld2YV9TSeNgCSOvqlxhJvVW8WD|9|19-870-813-8585|8247.11|FURNITURE|low, special platelets alongside of the even, bold theodolites are carefully |
+342|Customer#000000342|SpDDdUfraEAfCULAuGLE|18|28-690-119-9571|7186.74|AUTOMOBILE|luffily final ideas. finally unusual requests boost slyly above the furio|
+343|Customer#000000343|ejvvSNHIkJVm8I1zpQINNn5yyJbA|3|13-877-910-5134|5521.36|HOUSEHOLD| unusual requests cajole blithely about the carefully express ideas. blithely even excuses above the pint|
+344|Customer#000000344|Zasc8,E0VVY|2|12-810-788-6699|-544.95|FURNITURE|le according to the regular instruction|
+345|Customer#000000345|dGFK ICPKxnsAzlX4UYOUf,n200yyEWhIeG|9|19-209-576-4513|1936.77|AUTOMOBILE|en pinto beans nag along the slyly regular deposits. slyly ir|
+346|Customer#000000346|K61SvIue3Emcwfel,7f9tO5WyJ58MbT7k3iS|2|12-100-890-4659|238.14|FURNITURE|ickly even pinto beans affix across the bravel|
+347|Customer#000000347|qRT7WRrnykLDfTc5Ei|1|11-519-832-9913|7348.92|BUILDING|ts use blithely blithely regular theodolites. even requests after the|
+348|Customer#000000348|ciP7BWkhOe1IbbVGlqJePBI6ZwqENkS|13|23-986-141-5327|3310.49|HOUSEHOLD|al foxes are on the carefully final excuses. careful dependen|
+349|Customer#000000349|vjJBjxjW9uoRZP02nS p6XY5wU6Ic,6xHpxUKA|23|33-818-229-3473|-565.35|BUILDING|y. bold, ironic instructions after the theodolites sleep blithely ironic packages. ideas c|
+350|Customer#000000350|G vBMGVmIOHl7tc4HeNMiMkKY|15|25-960-809-3690|19.31|BUILDING|tions. quietly unusual accounts sleep blithely afte|
+351|Customer#000000351|De35Hx1QiyS0uy|7|17-873-420-4342|3419.54|AUTOMOBILE|telets haggle blithely against the ironic|
+352|Customer#000000352|HqhIE5GRTK0dFtWpJUQENU4aa1bwdsUBEWtzUw|9|19-906-158-8420|6257.88|HOUSEHOLD|ts are. blithely special requests wake. furiously bold packages among the blithely eve|
+353|Customer#000000353|eftGCmL4b5rAKdvUe9biJXzAH|10|20-733-644-2244|3199.03|BUILDING|nal theodolites nag carefully. requests wake. slyly ironic ideas according to the blithely pe|
+354|Customer#000000354|sV3WgvJA06WngO4|2|12-545-101-2447|7095.95|BUILDING|. regular, final requests cajole fluffily. express attainments wake slyly until the even acco|
+355|Customer#000000355|205r3Xg9ZWjPZNX1z|14|24-656-787-6091|8727.90|FURNITURE|ly bold requests detect furiously. unusual instructions sleep aft|
+356|Customer#000000356|9RfNXUJivKTonL2bp1eG5IT|10|20-415-457-4421|2934.06|FURNITURE|al packages haggle always. daringly bold inst|
+357|Customer#000000357|l2C0Xkdib4t4 qKFUcRDOhRQMK7U0|18|28-452-965-8560|8747.36|AUTOMOBILE|ress platelets cajole fluffily final accounts: slyly ironic foxes s|
+358|Customer#000000358|F  z jplpUKWz1Hn7p3ez2qTsiIh|5|15-457-255-3822|-44.66|MACHINERY|e furiously pending requests. slyly bold requests wake deposits. furiously express|
+359|Customer#000000359|z4lUH9ssc3K2w0UjRIuNRrdqw|14|24-608-547-4751|6375.23|FURNITURE|ifts wake fluffily ironic ideas. slyly ironic deposits above the |
+360|Customer#000000360|S,6ajyDFO3WUQ0Qr|17|27-604-646-1645|6542.83|FURNITURE|engage. quickly final platelets about the fluffily unusual accounts wake |
+361|Customer#000000361|l0F8jMJVe63cb|20|30-164-267-4590|7451.84|BUILDING|fully busy ideas. regular foxes cajole |
+362|Customer#000000362|UscV00TNrNTDddxF7BTk|17|27-651-653-4122|6149.01|AUTOMOBILE|ut the fluffily ironic platelets. ironi|
+363|Customer#000000363|2Koh mYARhsVcFn0U2Abt35qIyedAr1TxP|17|27-460-529-3937|-573.86|HOUSEHOLD|s. carefully unusual deposits are foxes. furiously even foxes nag carefully according to the furiously express |
+364|Customer#000000364|SQ3b5Q5OtrmmZjJ87tq,o1TiXKVJQ0M7ZOuud|23|33-492-647-4972|32.24|HOUSEHOLD| dependencies? pending requests use carefull|
+365|Customer#000000365|QiZRz y1xU|24|34-708-696-5226|737.03|HOUSEHOLD|counts. unusual packages are blithely foxes. unusual dinos|
+366|Customer#000000366|pPQektSfn55AC7s9SRFkj07I2yXqakvCa|3|13-915-531-6826|-729.74|MACHINERY|nos wake quickly. regular, regula|
+367|Customer#000000367|yZaDoEZCqt2VMTVKoZUkf6gJ4yj|13|23-939-319-4691|9108.65|HOUSEHOLD|eodolites under the ironic, stealthy requests affix furiously among the unusual tit|
+368|Customer#000000368|9p ReFA4fseKWYUaUHi|22|32-552-596-4994|84.72|MACHINERY|ic asymptotes. quickly special packages along the bravely bold depos|
+369|Customer#000000369|ge1XhgI3ADIkvLr5GPMqpup,hzlTVv|8|18-333-644-9832|2881.06|FURNITURE| theodolites? quickly quick foxes are fluffily slyly regular instructions. fluffily|
+370|Customer#000000370|oyAPndV IN|12|22-524-280-8721|8982.79|FURNITURE|ges. final packages haggle quickly. slyly bold |
+371|Customer#000000371|dnxjCYwhuSHx 9KX38nV0R16fG|22|32-119-346-2028|7789.14|AUTOMOBILE|equests shall boost furiously special pinto beans. express, ironic ideas sleep across the ironi|
+372|Customer#000000372|aKPMNZfbgV0neVIBo|19|29-226-339-6392|-921.91|MACHINERY|. furiously even foxes sleep at the forges. bold accounts sleep after the ironic theodolites. ironi|
+373|Customer#000000373|2hrQ wHkbaNlJCzY,mVkugMIE 8ryNlaA3JHDTjJ|20|30-883-170-4010|2354.06|MACHINERY|requests wake blithely even packages. slyly ironic deposits haggle blithely |
+374|Customer#000000374|fg4eklU1,UaFOan|22|32-282-723-3627|6718.78|AUTOMOBILE|ges are carefully. slyly ironic deposits about the fin|
+375|Customer#000000375|e53JADEeGvM1ikhN7aa|15|25-575-273-9756|5562.22|HOUSEHOLD|st the pending accounts. final courts above the pending pinto beans use furiously ironic requests. dolphins |
+376|Customer#000000376|4NwsvFQU T4mSgzvU1Rx2ZtHOGyaNyhe|16|26-437-952-8986|4231.45|AUTOMOBILE|gs cajole quickly. bold asymptotes wake regularly along the quickly |
+377|Customer#000000377|PA4levhyD,Rvr0JHQ4QNOqJ9gW YXE|23|33-260-610-4079|1043.72|MACHINERY|. slyly regular ideas cajole blithely. slyly ironic foxes are carefully after the thinly special accou|
+378|Customer#000000378|133stqM9 LT,a2BSlbm49 nXreFggaZgW6P6J|22|32-147-793-4825|5718.05|BUILDING|ackages haggle fluffily ironic packages.|
+379|Customer#000000379|t3QzCf,q1NbshmjOIUY|7|17-228-550-9246|5348.11|AUTOMOBILE|l deposits cajole blithely blithely final deposits. express, even foxes grow carefully about the sile|
+380|Customer#000000380|n2w3Jd1bipwICbOVgrELzcNRexmWSklo|21|31-538-493-4229|2755.46|BUILDING|riously special accounts. slyly final accounts sleep; blithely special requests integrate carefully slyly en|
+381|Customer#000000381|w3zVseYDbjBbzLld|5|15-860-208-7093|9931.71|BUILDING|t regular, bold accounts. carefully quick packages haggle. care|
+382|Customer#000000382|UdgAMamK5JnSykA,ZPfR5W5zRFatDUye|8|18-942-650-6657|6269.42|AUTOMOBILE|. blithely express notornis according to the blithely even requests are never fina|
+383|Customer#000000383|iBIHYgXvVDpu6qq7FlqXVcAIDAzv4qs|2|12-868-920-9034|-849.44|MACHINERY|slyly express ideas haggle blithely unusual dugouts. ironic pinto beans are ironic ideas.|
+384|Customer#000000384|kDDMb3ML nUSu2Sn7CUHyZVedAFUx9|9|19-271-453-8361|-614.30|HOUSEHOLD|olites. express, unusual dolphins cajole carefully about the |
+385|Customer#000000385|zJvPI24TSPpiFzYfu3RvTKQ9|3|13-741-675-6890|2457.09|AUTOMOBILE|rs. blithely ironic deposits nag furiously across the furiously ironic accounts. bold deposits sleep express|
+386|Customer#000000386|DeQxsCxixT8RQ7JV6mddRYGDGQ2WM94|24|34-193-143-1425|232.01|BUILDING|counts. blithely permanent deposits wake slyly! unusual, even theodolites u|
+387|Customer#000000387|Yj 9g1mNu00rKRkc1ovOmptsPI|18|28-694-363-3673|3404.23|HOUSEHOLD|oach. blithely regular instructions sublate across the quickly regular ideas. qui|
+388|Customer#000000388|dV4lqEufXkF8R|7|17-856-814-6352|1938.05|HOUSEHOLD| carefully bold deposits: final pinto beans sleep slyly idl|
+389|Customer#000000389|ij8KNM0,HRvIvnvY w8jQK4zvr1EOO9YM|9|19-264-943-1253|-307.61|AUTOMOBILE|o beans affix fluffily. slyly ironic notornis wake |
+390|Customer#000000390|Nsc3VZZnVsw0mLAnqqzVz,|4|14-812-253-6693|8862.18|HOUSEHOLD| final packages promise quickly. pending theodolites haggle quickly above the doggedly ironic|
+391|Customer#000000391|q10SV05KB1038lzUR8P|11|21-604-451-4462|4801.30|HOUSEHOLD|le blithely final forges. furiously even deposits cajole fluffily even patterns. furious|
+392|Customer#000000392|H7M6JObndO|17|27-601-793-2507|8492.33|BUILDING|efully bold ideas. bold requests sleep carefully blithe instructions. carefully final accounts are blithely quickly |
+393|Customer#000000393|RSELskV44I3LFA9VLGY2Qe|20|30-749-949-5915|3593.57|FURNITURE|ake furiously express notornis. pending accounts hang slyly slyly blithe theod|
+394|Customer#000000394|nxW1jt,MQvImdr z72gAt1bslnfEipCh,bKZN|23|33-422-600-6936|5200.96|MACHINERY| instructions. carefully special ideas after the fluffily unusual r|
+395|Customer#000000395|b06rg6Cl5W|15|25-804-388-6600|4582.28|HOUSEHOLD|s mold blithely regular platelets. slyly silent instructions use slowly slyly specia|
+396|Customer#000000396|miE7JrCdGpQkF4zYJ27tBdSu IYhQ HXx0 |22|32-902-936-4845|1433.50|BUILDING|xcuses. regular pains wake slyly across the ruthlessly ironic dependencies. e|
+397|Customer#000000397|EzR2BKJ85SmBDS|7|17-103-357-8777|709.46|FURNITURE|al theodolites. regular accounts are regular, silent foxes. unusual asymptotes above t|
+398|Customer#000000398|cq9NmtIT4b6JB8L79iLzljlHs4 3|15|25-110-215-3747|8865.61|HOUSEHOLD|l deposits breach slyly ironic asymptotes. carefully pend|
+399|Customer#000000399|ZBvzMa6N1wdCGaPmG13xVusIxdjSiA94jTXN|8|18-882-664-5454|7358.53|BUILDING|yly even excuses. ironic theodolites wake furiously. blithely regular pinto beans cajole. fin|
+400|Customer#000000400|U23zy17EPxqmJn7neVc|14|24-522-746-1247|-98.46|BUILDING|fully bold accounts cajole bravel|
+401|Customer#000000401|aKALIG526OK4veQfUh2KmKcE,oRyg|19|29-667-766-5291|4146.43|BUILDING|l instructions wake. slyly express deposits us|
+402|Customer#000000402|8Cw4p1m1gKYVUgomkAq,es1ZtrnmHaO|6|16-950-729-1638|2106.67|AUTOMOBILE|dolites. furiously regular theodolites integrate furiously. bravely bold requests are. furiously|
+403|Customer#000000403|9,BVYegfkFLsEMDkeVW|14|24-753-433-1769|6693.36|HOUSEHOLD|al hockey players; ironic dependencies after t|
+404|Customer#000000404|2orgvLJ05jOvM292mhkS7iJmHG0jk|22|32-840-785-1776|7408.73|BUILDING|uickly brave requests haggle furiously carefully special idea|
+405|Customer#000000405|mCQNH1rJtqjjQ9Piauc2bZr4pRFydscZtbD9d|10|20-509-301-7901|7519.14|MACHINERY|nts. pending, express foxes sleep? ironic, pending instructions haggle. ironic, pending theodolites detect slyly. bl|
+406|Customer#000000406|j1fOG9WsIr2JI6Yi9jgJ M|9|19-426-693-4043|4286.94|FURNITURE|nal foxes. unusual pinto beans wake. special excuses cajole ironic |
+407|Customer#000000407|cfCP9bE3HnI|1|11-975-454-8499|9537.08|MACHINERY|ect among the carefully regular theodolites. regular dep|
+408|Customer#000000408|TBjb3m,3aea4JtP833HD4VDk7STz2Y9FB|10|20-177-807-5661|6825.37|BUILDING|unts. furiously ironic depths among the instructions wake carefully along the blithely ironi|
+409|Customer#000000409|mtrMiDvQxNsy1Cj0cU4ITEW5wGKLPQ2IPHNE9r4|11|21-466-412-4731|3969.86|FURNITURE|fily pending courts. express, regular packages are furiously along the quickly regular packages.|
+410|Customer#000000410|nYak2u Q9,gYUiLfh1N|7|17-576-345-5940|4349.27|BUILDING| sublate across the pending, express asymptotes. quickly |
+411|Customer#000000411|V3e,FX5x50scsQDzt5,ESxfOQBt4OzjHRoTZxF|18|28-483-924-1955|1209.32|HOUSEHOLD|refully. slyly even packages above the evenly regular asymptotes are blithely ironic dependencies. deposi|
+412|Customer#000000412|5IN2Y,QrhDJ2YBVGKiDbMpzi2hk1fmozIy2zQ|22|32-940-318-3191|6044.02|BUILDING|ithely silent notornis haggle. regular requests haggle according to the ironic deposits. blithely final dep|
+413|Customer#000000413|,4Jm5N0ruhJCB7cBR6Kw|6|16-158-285-7336|5817.90|FURNITURE|ular packages integrate furiously fluffily final accounts. carefully regular |
+414|Customer#000000414|i49DWI61AFb 45vb1RMH|19|29-552-380-2475|527.78|AUTOMOBILE|sily silent, even accounts. careful, final ideas boost fluffily. slyly final pinto be|
+415|Customer#000000415|334jCRiUb,gx3|23|33-346-876-2972|2317.93|FURNITURE|egular deposits. blithely ironic inst|
+416|Customer#000000416|fm7H7k6sYhKfXttOT|12|22-651-146-4780|4365.28|MACHINERY|p the pending pinto beans. furiously express reques|
+417|Customer#000000417|X3LMSpIn4FgjgJxldHVUlUvKzyX|11|21-794-364-5100|6187.73|BUILDING|lent multipliers. quickly express theodolites kindle blithely. ironic re|
+418|Customer#000000418|,e0q82drO rgVHXHrJRQ0GDrRoUOl|5|15-826-508-1218|1211.39|FURNITURE|d foxes against the furiously special packages snooze blithely quickly |
+419|Customer#000000419|gvbZNJ4UVBAo5yOZ2UOWcvV9TeTj|16|26-338-447-2399|7786.69|BUILDING|ideas affix alongside of the final accounts. quickly ironic deposits abo|
+420|Customer#000000420|HV0YB82MWw93 9K|20|30-776-366-5869|1999.35|BUILDING|ideas wake. fluffily ironic packages hang furiously above the regular, even platelets; packages haggle slyly |
+421|Customer#000000421|it3mUlkZAe9J8gmy|13|23-918-228-2560|7073.17|FURNITURE|lithely final deposits haggle furiously above the|
+422|Customer#000000422|AyNzZBvmIDo42JtjP9xzaK3pnvkh Qc0o08ssnvq|9|19-299-247-2444|-272.14|HOUSEHOLD|eposits; furiously ironic packages accordi|
+423|Customer#000000423|Y2B EbOg39GpFLS0n|13|23-201-501-7824|95.79|BUILDING|ts cajole after the silent, pending instructions. ironic, even asymptotes use carefully. furi|
+424|Customer#000000424|i4cf3kmRE9IJr,cu,1|19|29-891-311-6778|1866.42|HOUSEHOLD|bove the express, final deposits wake furiously furiou|
+425|Customer#000000425|lp3aCRBK11qFY|16|26-756-407-4828|5824.88|HOUSEHOLD|ajole even, pending accounts. carefully brave accounts|
+426|Customer#000000426|GjFjM4zjbyhNrV6XlE|19|29-768-330-6311|7818.25|HOUSEHOLD|ar instructions are against the ironic platelets. slyly final acc|
+427|Customer#000000427|LHzXa71U2AGqfbqj1yYYqw2MEXq99dWmY|2|12-124-309-3821|4376.80|MACHINERY|y even multipliers according to the regu|
+428|Customer#000000428|TCVjlzbX7x,kWcHN33LRdEjO38mAGmPR|21|31-587-557-8211|1952.36|BUILDING|furiously quick accounts. slyly bold dependencies cajole carefully. quickly even requests int|
+429|Customer#000000429|kZBtY,LQAFu4iaSagjfIk8Q8dzgmT|15|25-989-936-1954|9247.21|FURNITURE|ly regular requests haggle enticing excuses. carefully ironic requests on th|
+430|Customer#000000430|s2yfPEGGOqHfgkVSs5Rs6 qh,SuVmR|3|13-406-611-4228|7905.17|BUILDING|ly slyly ironic attainments. slyly special instructions until the deposits nag quickly whithout the bo|
+431|Customer#000000431|RNfSXbUJkgUlBBPn|6|16-326-904-6643|2273.50|HOUSEHOLD|e quickly. final, even excuses against the even accounts sleep agai|
+432|Customer#000000432|FDConiq g20GI9dH QTM ZNX4OB9KU|23|33-307-912-9016|5715.64|BUILDING|wake carefully close, special deposits. regu|
+433|Customer#000000433|7XFuE4 euQR0w|20|30-659-445-3595|8746.23|FURNITURE|sual ideas affix carefully always regular accou|
+434|Customer#000000434|6LGAf2hv4MB5MJhfvNsg|3|13-325-443-1474|2940.46|MACHINERY|lly final Tiresias. blithely regular ideas nag stealthily about the furiously |
+435|Customer#000000435|diwjNQSb3wLYLy WfCDATo5rc1I3 s|2|12-741-309-6377|6217.46|MACHINERY|quickly excuses. blithely express theodolites poach slyly along the theodolites. slyly reg|
+436|Customer#000000436|4DCNzAT842cVYTcaUS94kR0QXHSRM5oco0D6Z|19|29-927-687-6390|5896.87|FURNITURE|olites engage carefully. slyly ironic asymptotes about the ironi|
+437|Customer#000000437|0PM1xuHd0q2ElcJp 77F2MykOVBSQnZR8u3jkn|4|14-364-492-8498|7760.52|AUTOMOBILE| foxes sleep across the slyly unusual pack|
+438|Customer#000000438|eqo9A9oaE2CA7 7,L|23|33-394-388-4375|2131.13|MACHINERY|al deposits mold alongside of the fluffily brave requests. |
+439|Customer#000000439|3deBblz2syRv8yMf0yAVKkE4mDH20uDRj4tJVHUm|14|24-873-368-6801|-61.29|BUILDING|ions may impress thinly for the deposits? even packages towa|
+440|Customer#000000440|w4fKMgiBuGmV,nLn7NgJl1DoUWwNQMV8z  5,R|3|13-244-480-5751|1809.04|MACHINERY| even theodolites: fluffily final requests cajole about the quickly regular|
+441|Customer#000000441|gjYpcBx6MP8GvDa6|23|33-438-355-3491|9451.84|HOUSEHOLD|r requests wake theodolites. quickly final ideas haggle fluffily. blithely f|
+442|Customer#000000442|rvgayfJFLO2cjzMA|1|11-240-523-8711|4157.00|FURNITURE|lets would affix fluffily. regular, regular ideas ought to haggle carefully blit|
+443|Customer#000000443|UdyNGZ6GSz5aNpMO5N2|3|13-241-131-1632|3726.22|FURNITURE|t the special, final platelets. bold req|
+444|Customer#000000444|D8l4G8i9aZ7KRbqp6ajvR8h1wjr|1|11-402-300-1949|1505.27|HOUSEHOLD| express accounts along the pending deposits lose carefully above the furiously regular requests. pen|
+445|Customer#000000445|MX1UA0KUJzIGyWM p2hbLg5dCpVLws8KNcwEsP|20|30-849-846-6070|8018.81|FURNITURE|e ironic, special accounts. quickly regular packages integrate fluffily slyly|
+446|Customer#000000446|mJOJwYfch izLCuw70,qhlJSmH|24|34-321-168-5681|9225.60|FURNITURE|ending instructions. boldly ironic foxes across the regularly ironic pains sleep along the carefully final deposits.|
+447|Customer#000000447|hVZBzP8Pii|3|13-438-344-7007|7665.98|HOUSEHOLD|telets around the furiously unusual foxes detect carefully against the |
+448|Customer#000000448|BH4vtnDpabk0NgoGNJWu4OUXnidfJ|24|34-985-422-6009|8117.27|BUILDING|unts. final pinto beans boost carefully. furiously even foxes according to the express, regular pa|
+449|Customer#000000449|DiUXazp8EYcJFsX2a7nciEpo9W5BRB4iqdb9HWL|4|14-893-381-6454|3001.94|MACHINERY|posits boost slyly carefully regular requests. final, bold fo|
+450|Customer#000000450|KVpuYa4dDW8lZZVBttyK614C2qdS|9|19-782-397-9006|5544.42|HOUSEHOLD|gular decoys nod slyly express requests. slyly bold theodolites are along the regular|
+451|Customer#000000451|ZJKTC1Ck,B01fYZ xdN2|20|30-939-275-3248|2110.59|HOUSEHOLD|quests grow furiously final deposits. ironic, even pi|
+452|Customer#000000452|,TI7FdTc gCXUMi09qD|6|16-335-974-9174|6633.70|BUILDING|aggle quickly. unusual instructions i|
+453|Customer#000000453|PZ4mmWL7R,El0MtLWMfLXp120lo0,itmO|8|18-209-381-8571|5678.18|HOUSEHOLD|sts. slyly even dolphins across the bold, regular foxes haggle blithely|
+454|Customer#000000454|d9oQCm3onNsFlIoteVjFcQDv|7|17-818-915-9400|6134.40|AUTOMOBILE|ions print slyly platelets. carefully regular packages according to the fluffy, even foxes wake carefu|
+455|Customer#000000455|sssuscPJ,ZYQ8viO|6|16-863-225-9454|6860.34|BUILDING|l wake. blithely final instructions integrate furiously above the final, regular req|
+456|Customer#000000456|IgUSuulguDJ5|0|10-784-971-7777|8815.78|FURNITURE|ly even warhorses. quickly even requests wake slyly. |
+457|Customer#000000457|eaAWe Vqr0x17Uwj1uzQRb wQpXxZVDWS3Wg|20|30-543-684-2857|5867.61|FURNITURE|the foxes. carefully pending instructions integrate fluffily blithely pending packages. careful|
+458|Customer#000000458|iIKwI3HrgNlD9|4|14-651-706-4016|-38.42|BUILDING|ng. final, express requests are furious|
+459|Customer#000000459|CkGH34iK 9vAHXeY7 wAQIzJa1cmA8DAEA7m|6|16-927-662-8584|1207.97|MACHINERY|ronic, regular dependencies use above the ironic deposits. carefully express packages use car|
+460|Customer#000000460|Gbx5Hnw,ctlI7|11|21-643-955-6555|5222.83|FURNITURE|old dependencies mold slyly above the foxes. dogged, express ins|
+461|Customer#000000461|5vxNLzSASzkbrUr8CRf5|21|31-533-226-4307|9177.63|AUTOMOBILE|sits breach blithely. slyly regular ideas haggle fluffily; special ideas cajole q|
+462|Customer#000000462|MSqsCvNEkowp7FnscRXP6OUWm|21|31-157-561-4106|4522.60|HOUSEHOLD|ly special accounts? ideas engage regular dependencies. fluffily even pinto beans x-ray blith|
+463|Customer#000000463|LV7MN7Tkm2NSo4Q3lwvjxGQyRJjRZRf,M|8|18-167-214-5805|-654.50|HOUSEHOLD| quickly along the final ideas. slyly regular accounts are iro|
+464|Customer#000000464|kAALP9gEt3,G9XtxCXjv38HjKBEP|9|19-269-971-9738|8730.85|AUTOMOBILE|efully express accounts play. special requests use carefully. regular courts sle|
+465|Customer#000000465|gngnTNn7azjgQlQJnakTZto|2|12-137-838-1346|8432.74|FURNITURE|es. quick asymptotes integrate carefully alongside of the ideas. even requests believe slyly even ac|
+466|Customer#000000466|ZI1c8,ZanegEu5CEQxNf5,bkuYPwn7H7JIK7|12|22-280-738-3240|3168.41|MACHINERY|foxes. express, ironic accounts boost? carefully silent deposits engage. accou|
+467|Customer#000000467|amwRkh0nDQ6r6MU|11|21-449-581-5158|9398.51|MACHINERY|manently special warthogs. final ideas a|
+468|Customer#000000468|IcbihAtOVWcnswfyE|10|20-489-960-5023|9834.19|FURNITURE| accounts cajole quickly above the blithely final packages. even, express package|
+469|Customer#000000469|JWOULMa5Qtt|12|22-406-988-6460|6343.64|BUILDING|cajole carefully slyly regular packages.|
+470|Customer#000000470|v9 gWSuP4WrOjNJRgyJtjbNCChQME|20|30-507-458-4433|3597.53|HOUSEHOLD|ilent excuses. never ironic requests sleep furiously. daringly f|
+471|Customer#000000471|tGr0DtrK 91IgzfeZrSPpPIia3|4|14-574-118-1005|5716.90|FURNITURE|es. unusual accounts try to solve ca|
+472|Customer#000000472|hWgfnsmTAEOx9Mqp87YwztGrgLLqNkjMPh4|12|22-940-478-1933|7929.90|MACHINERY|deas sleep slyly blithely final foxes. slyly final e|
+473|Customer#000000473|zO3W9pYj PvlsQGe|9|19-209-647-5704|-202.22|HOUSEHOLD|ter the quickly pending requests sleep above the carefully iron|
+474|Customer#000000474|mvEKw,6zT0V8Yb2yTG hu990UX|21|31-247-536-6143|9165.47|MACHINERY|ns integrate against the quickly special courts. slyly|
+475|Customer#000000475|JJMbj6myLUzMlbUmg63hNtFv4pWL8nq|14|24-485-422-9361|9043.55|BUILDING|egular requests. ironic requests detect furiously; deposits ha|
+476|Customer#000000476|68r87HCBbQkVYaVfes8mgKs|2|12-996-628-9902|5973.10|BUILDING|sly. carefully quick instructions sleep carefully deposits. final, pending pinto beans use closely fluffily final in|
+477|Customer#000000477|5aW5WHphNgFdIS1Qdp2cIJXG8ER8|23|33-845-877-6997|1836.61|AUTOMOBILE|totes are blithely among the furiously final foxes. slyly |
+478|Customer#000000478|clyq458DIkXXt4qLyHlbe,n JueoniF|1|11-655-291-2694|-210.40|BUILDING|o the foxes. ironic requests sleep. c|
+479|Customer#000000479|RdIiG8NbwYtamReRwhR|18|28-336-406-1631|3653.64|AUTOMOBILE|ages. bravely even foxes detect careful|
+480|Customer#000000480|XyQSPswCeO WPD37K3 mYZ4hnCMJO5p|7|17-231-147-5851|2750.71|FURNITURE|posits. slyly ironic theodolites nag carefully about the quickly final accounts. s|
+481|Customer#000000481|o4xa7J20NqHM8E0ykH,NKe1gPz04OqIn|21|31-363-392-6461|7157.21|FURNITURE|s can nag slyly instructions. regular, regular asymptotes haggle sly|
+482|Customer#000000482|389RgNCsmVUKiRskmrQQm90xx JiIxOM0|13|23-732-448-1610|4333.37|HOUSEHOLD|carefully bold instructions. carefully final instructions wake carefully accounts. accounts cajole slyly ironic acc|
+483|Customer#000000483|Yv1QV 1JsV 9sVbNufRvdnprt0grx52|11|21-799-189-1135|8877.20|MACHINERY|pecial ideas. furiously final i|
+484|Customer#000000484|ismzlUzrqRMRGWmCEUUjkBsi|20|30-777-953-8902|4245.00|BUILDING|y against the express, even packages. blithely pending pearls haggle furiously above the fur|
+485|Customer#000000485|XeFbvXCQ,J|19|29-434-961-1623|8695.45|MACHINERY|ecial pinto beans. instructions ought to cajole even|
+486|Customer#000000486|2cXXa6MSx9CGU|21|31-787-534-8723|7487.40|AUTOMOBILE|nstructions. unusual, special pinto beans sleep about the slyly pending requests. fu|
+487|Customer#000000487|oTc,l9dAf8O0qOOMP4P0WFTuGS|2|12-111-401-4259|9749.37|AUTOMOBILE|as. excuses use carefully carefully pending i|
+488|Customer#000000488|bBcMjFPTysSTaTdHcoO|3|13-513-778-1881|-275.58|AUTOMOBILE|thely above the carefully ironic accounts. excuse|
+489|Customer#000000489|GIdW4IVgeqWMBXnNFZGHS8kmhw|4|14-916-241-6195|8255.83|AUTOMOBILE|lar accounts. finally pending dependencies solve fluffily |
+490|Customer#000000490| 66fG3Fyb946cVQsH9Z3VMNzR,yfHMKIEB|22|32-268-147-7824|-213.85|FURNITURE|ash carefully never bold instructions. regular, bold asymptotes cajole regularly. quickly bold foxes wak|
+491|Customer#000000491|r3zPOuenxHl0oqInxWlEyLP1ZH|0|10-856-259-7548|785.37|AUTOMOBILE| slyly special requests hang dogged, express epitaphs.|
+492|Customer#000000492|JexAgMLuUHoElYFaKx,hJcAP1b1GknYoYHQLyx|8|18-686-244-1077|8635.18|AUTOMOBILE|gle furiously furiously final packages. carefully bold pinto beans promise quickly alongside of the close|
+493|Customer#000000493|G dRBjxmBBug1 xRSa6VwRchFDtU5b|16|26-514-558-7246|6582.04|MACHINERY|er the furiously express excuses use above the regular accounts. regular instructions after the |
+494|Customer#000000494|GKgTjHFlQrDZWcketSqhZCopBhmChknI|10|20-330-453-6579|6295.47|FURNITURE|al courts. regular, ironic requests serve furiously. pending|
+495|Customer#000000495|QhFbEv6KbQIwfZs 1krt1eACKI31v3iyM|7|17-400-405-6060|7997.81|BUILDING| dependencies. silent accounts cajole quickly furiously pendin|
+496|Customer#000000496|Y8oYLlHme6Z4fEzkTu|12|22-173-644-7922|8174.82|MACHINERY| quickly bold packages. decoys among the blithely pending accounts lose according to the deposits.|
+497|Customer#000000497|0 qRRXAxUbo1J KDwDMjFde5fXDwn |23|33-937-724-3506|2191.59|BUILDING|fluffy ideas detect carefully |
+498|Customer#000000498|1Wnja9i7KAC3HxS5yATK,In8Q6AHcEUr0f5Tp|19|29-210-810-1479|3945.64|BUILDING|yly pending requests according to the slyly special asymptotes sleep carefully against the slyly even pack|
+499|Customer#000000499|m1hO3VXQVbwTbJ99Hw|14|24-387-817-9149|4293.76|HOUSEHOLD|old sentiments cajole carefully among the blithely unusual requests. final packages nag careful|
+500|Customer#000000500|fy7qx5fHLhcbFL93duj9|4|14-194-736-4233|3300.82|AUTOMOBILE|s boost furiously. slyly special deposits sleep quickly above the furiously i|


[04/50] [abbrv] carbondata git commit: [CARBONDATA-2434] Add ExternalTableExample and LuceneDataMapExample

Posted by ma...@apache.org.
[CARBONDATA-2434] Add ExternalTableExample and LuceneDataMapExample

For preparing 1.4.0 release.

This closes #2268


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/d5da9a19
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/d5da9a19
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/d5da9a19

Branch: refs/heads/spark-2.3
Commit: d5da9a19434b9a28912110dac422cc89630ac93e
Parents: 09feb9c
Author: chenliang613 <ch...@huawei.com>
Authored: Fri May 4 10:35:52 2018 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Wed May 9 00:01:46 2018 +0530

----------------------------------------------------------------------
 .../examples/ExternalTableExample.scala         | 104 +++++++++++++++++
 .../examples/LuceneDataMapExample.scala         | 116 +++++++++++++++++++
 .../examples/PreAggregateDataMapExample.scala   |  12 +-
 .../carbondata/examplesCI/RunExamples.scala     |   8 ++
 4 files changed, 234 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/d5da9a19/examples/spark2/src/main/scala/org/apache/carbondata/examples/ExternalTableExample.scala
----------------------------------------------------------------------
diff --git a/examples/spark2/src/main/scala/org/apache/carbondata/examples/ExternalTableExample.scala b/examples/spark2/src/main/scala/org/apache/carbondata/examples/ExternalTableExample.scala
new file mode 100644
index 0000000..9d5ee8e
--- /dev/null
+++ b/examples/spark2/src/main/scala/org/apache/carbondata/examples/ExternalTableExample.scala
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.examples
+
+import java.io.File
+
+import org.apache.spark.sql.{CarbonEnv, SaveMode, SparkSession}
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.metadata.CarbonTableIdentifier
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.examples.util.ExampleUtils
+
+/**
+ * This example is for showing how to create external table with location.
+ */
+
+object ExternalTableExample {
+
+  def main(args: Array[String]) {
+    val spark = ExampleUtils.createCarbonSession("ExternalTableExample")
+    exampleBody(spark)
+    spark.close()
+  }
+
+  def exampleBody(spark : SparkSession): Unit = {
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy/MM/dd")
+
+    // Create origin_table
+    spark.sql("DROP TABLE IF EXISTS origin_table")
+    spark.sql(
+      s"""
+         | CREATE TABLE origin_table(
+         | shortField SHORT,
+         | intField INT,
+         | bigintField LONG,
+         | doubleField DOUBLE,
+         | stringField STRING,
+         | timestampField TIMESTAMP,
+         | decimalField DECIMAL(18,2),
+         | dateField DATE,
+         | charField CHAR(5),
+         | floatField FLOAT
+         | )
+         | STORED BY 'carbondata'
+       """.stripMargin)
+
+    val rootPath = new File(this.getClass.getResource("/").getPath
+                            + "../../../..").getCanonicalPath
+    val path = s"$rootPath/examples/spark2/src/main/resources/data.csv"
+
+    // load 4 times, each load has 10 rows data
+    // scalastyle:off
+    (1 to 4).foreach(_ => spark.sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$path'
+         | INTO TABLE origin_table
+         | OPTIONS('HEADER'='true', 'COMPLEX_DELIMITER_LEVEL_1'='#')
+       """.stripMargin))
+    // scalastyle:on
+
+    // 40 rows
+    spark.sql("SELECT count(*) FROM origin_table").show()
+
+    val origin_table_path = CarbonEnv.getTablePath(Some("default"), "origin_table")(spark)
+
+    // Create external_table
+    spark.sql("DROP TABLE IF EXISTS external_table")
+    spark.sql("CREATE EXTERNAL TABLE external_table STORED BY 'carbondata'" +
+              s" LOCATION '$origin_table_path'")
+    spark.sql("SELECT count(*) FROM external_table").show()
+
+    // Load 2 times again
+    (1 to 2).foreach(_ => spark.sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$path'
+         | INTO TABLE origin_table
+         | OPTIONS('HEADER'='true', 'COMPLEX_DELIMITER_LEVEL_1'='#')
+       """.stripMargin))
+
+    spark.sql("SELECT count(*) FROM external_table").show()
+
+    // Drop tables
+    spark.sql("DROP TABLE IF EXISTS origin_table")
+    spark.sql("DROP TABLE IF EXISTS external_table")
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d5da9a19/examples/spark2/src/main/scala/org/apache/carbondata/examples/LuceneDataMapExample.scala
----------------------------------------------------------------------
diff --git a/examples/spark2/src/main/scala/org/apache/carbondata/examples/LuceneDataMapExample.scala b/examples/spark2/src/main/scala/org/apache/carbondata/examples/LuceneDataMapExample.scala
new file mode 100644
index 0000000..efe2a63
--- /dev/null
+++ b/examples/spark2/src/main/scala/org/apache/carbondata/examples/LuceneDataMapExample.scala
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.examples
+
+import org.apache.spark.sql.{SaveMode, SparkSession}
+
+import org.apache.carbondata.examples.util.ExampleUtils
+
+
+/**
+ * This example is for lucene datamap.
+ */
+
+object LuceneDataMapExample {
+
+  def main(args: Array[String]) {
+    val spark = ExampleUtils.createCarbonSession("LuceneDataMapExample")
+    exampleBody(spark)
+    spark.close()
+  }
+
+  def exampleBody(spark : SparkSession): Unit = {
+
+    // build the test data, please increase the data for more obvious comparison.
+    // if set the data is larger than 100M, it will take 10+ mins.
+    import scala.util.Random
+
+    import spark.implicits._
+    val r = new Random()
+    val df = spark.sparkContext.parallelize(1 to 10 * 10 * 1000)
+      .map(x => ("which test" + r.nextInt(10000) + " good" + r.nextInt(10),
+      "who and name" + x % 8, "city" + x % 50, x % 60))
+      .toDF("id", "name", "city", "age")
+
+    spark.sql("DROP TABLE IF EXISTS personTable")
+    df.write.format("carbondata")
+      .option("tableName", "personTable")
+      .option("compress", "true")
+      .mode(SaveMode.Overwrite).save()
+
+    // create lucene datamap on personTable
+    spark.sql(
+      s"""
+         | CREATE DATAMAP IF NOT EXISTS dm ON TABLE personTable
+         | USING 'lucene'
+         | DMProperties('INDEX_COLUMNS'='id , name')
+      """.stripMargin)
+
+    spark.sql("refresh datamap dm ON TABLE personTable")
+
+    // 1. Compare the performance:
+
+    def time(code: => Unit): Double = {
+      val start = System.currentTimeMillis()
+      code
+      // return time in second
+      (System.currentTimeMillis() - start).toDouble / 1000
+    }
+
+    val time_without_lucenedatamap = time {
+
+      spark.sql(
+        s"""
+           | SELECT count(*)
+           | FROM personTable where id like '% test1 %'
+      """.stripMargin).show()
+
+    }
+
+    val time_with_lucenedatamap = time {
+
+      spark.sql(
+        s"""
+           | SELECT count(*)
+           | FROM personTable where TEXT_MATCH('id:test1')
+      """.stripMargin).show()
+
+    }
+
+    // scalastyle:off
+    println("time for query on table with lucene datamap table:" + time_with_lucenedatamap.toString)
+    println("time for query on table without lucene datamap table:" + time_without_lucenedatamap.toString)
+    // scalastyle:on
+
+    // 2. Search for word "test1" and not "good" in the id field
+    spark.sql(
+      s"""
+         | SELECT id,name
+         | FROM personTable where TEXT_MATCH('id:test1 -id:good1')
+      """.stripMargin).show(100)
+
+     // 3. TEXT_MATCH_WITH_LIMIT usage:
+    spark.sql(
+      s"""
+         | SELECT id,name
+         | FROM personTable where TEXT_MATCH_WITH_LIMIT('id:test1',10)
+      """.stripMargin).show()
+
+    spark.sql("DROP TABLE IF EXISTS personTable")
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d5da9a19/examples/spark2/src/main/scala/org/apache/carbondata/examples/PreAggregateDataMapExample.scala
----------------------------------------------------------------------
diff --git a/examples/spark2/src/main/scala/org/apache/carbondata/examples/PreAggregateDataMapExample.scala b/examples/spark2/src/main/scala/org/apache/carbondata/examples/PreAggregateDataMapExample.scala
index 367c011..b008bd4 100644
--- a/examples/spark2/src/main/scala/org/apache/carbondata/examples/PreAggregateDataMapExample.scala
+++ b/examples/spark2/src/main/scala/org/apache/carbondata/examples/PreAggregateDataMapExample.scala
@@ -65,10 +65,6 @@ object PreAggregateDataMapExample {
        LOAD DATA LOCAL INPATH '$testData' into table mainTable
        """)
 
-    spark.sql("""
-       select * from mainTable
-       """)
-
     spark.sql(s"""
        LOAD DATA LOCAL INPATH '$testData' into table mainTable_other
        """)
@@ -152,16 +148,20 @@ object PreAggregateDataMapExample {
 
     // 2.compare the performance : with pre-aggregate VS main table
 
-    // build test data, if set the data is larger than 100M, it will take 10+ mins.
+    // build the test data, please increase the data for more obvious comparison.
+    // if set the data is larger than 100M, it will take 10+ mins.
+
     import spark.implicits._
 
     import scala.util.Random
     val r = new Random()
-    val df = spark.sparkContext.parallelize(1 to 10 * 1000 * 1000)
+    val df = spark.sparkContext.parallelize(1 to 10 * 10 * 1000)
       .map(x => ("No." + r.nextInt(100000), "name" + x % 8, "city" + x % 50, x % 60))
       .toDF("ID", "name", "city", "age")
 
     // Create table with pre-aggregate
+    spark.sql("DROP TABLE IF EXISTS personTable")
+    spark.sql("DROP TABLE IF EXISTS personTableWithoutAgg")
     df.write.format("carbondata")
       .option("tableName", "personTable")
       .option("compress", "true")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d5da9a19/examples/spark2/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
----------------------------------------------------------------------
diff --git a/examples/spark2/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala b/examples/spark2/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
index ddf8ee5..2b9b999 100644
--- a/examples/spark2/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
+++ b/examples/spark2/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
@@ -105,4 +105,12 @@ class RunExamples extends QueryTest with BeforeAndAfterAll {
   test("TimeSeriesPreAggregateTableExample") {
     TimeSeriesPreAggregateTableExample.exampleBody(spark)
   }
+
+  test("LuceneDataMapExample") {
+    LuceneDataMapExample.exampleBody(spark)
+  }
+
+  test("ExternalTableExample") {
+    ExternalTableExample.exampleBody(spark)
+  }
 }
\ No newline at end of file


[26/50] [abbrv] carbondata git commit: [CARBONDATA-2474] Support Modular Plan for Materialized View DataMap

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/Tpcds_1_4_Tables.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/Tpcds_1_4_Tables.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/Tpcds_1_4_Tables.scala
new file mode 100644
index 0000000..97772c7
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/Tpcds_1_4_Tables.scala
@@ -0,0 +1,819 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.testutil
+
+object Tpcds_1_4_Tables {
+  val tpcds1_4Tables = Seq[String](
+    s"""
+       |CREATE TABLE catalog_sales (
+       |  `cs_sold_date_sk` int,
+       |  `cs_sold_time_sk` int,
+       |  `cs_ship_date_sk` int,
+       |  `cs_bill_customer_sk` int,
+       |  `cs_bill_cdemo_sk` int,
+       |  `cs_bill_hdemo_sk` int,
+       |  `cs_bill_addr_sk` int,
+       |  `cs_ship_customer_sk` int,
+       |  `cs_ship_cdemo_sk` int,
+       |  `cs_ship_hdemo_sk` int,
+       |  `cs_ship_addr_sk` int,
+       |  `cs_call_center_sk` int,
+       |  `cs_catalog_page_sk` int,
+       |  `cs_ship_mode_sk` int,
+       |  `cs_warehouse_sk` int,
+       |  `cs_item_sk` int,
+       |  `cs_promo_sk` int,
+       |  `cs_order_number` bigint,
+       |  `cs_quantity` int,
+       |  `cs_wholesale_cost` decimal(7,2),
+       |  `cs_list_price` decimal(7,2),
+       |  `cs_sales_price` decimal(7,2),
+       |  `cs_ext_discount_amt` decimal(7,2),
+       |  `cs_ext_sales_price` decimal(7,2),
+       |  `cs_ext_wholesale_cost` decimal(7,2),
+       |  `cs_ext_list_price` decimal(7,2),
+       |  `cs_ext_tax` decimal(7,2),
+       |  `cs_coupon_amt` decimal(7,2),
+       |  `cs_ext_ship_cost` decimal(7,2),
+       |  `cs_net_paid` decimal(7,2),
+       |  `cs_net_paid_inc_tax` decimal(7,2),
+       |  `cs_net_paid_inc_ship` decimal(7,2),
+       |  `cs_net_paid_inc_ship_tax` decimal(7,2),
+       |  `cs_net_profit` decimal(7,2)
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE catalog_returns (
+       |  `cr_returned_date_sk` int,
+       |  `cr_returned_time_sk` int,
+       |  `cr_item_sk` int,
+       |  `cr_refunded_customer_sk` int,
+       |  `cr_refunded_cdemo_sk` int,
+       |  `cr_refunded_hdemo_sk` int,
+       |  `cr_refunded_addr_sk` int,
+       |  `cr_returning_customer_sk` int,
+       |  `cr_returning_cdemo_sk` int,
+       |  `cr_returning_hdemo_sk` int,
+       |  `cr_returning_addr_sk` int,
+       |  `cr_call_center_sk` int,
+       |  `cr_catalog_page_sk` int,
+       |  `cr_ship_mode_sk` int,
+       |  `cr_warehouse_sk` int,
+       |  `cr_reason_sk` int,
+       |  `cr_order_number` bigint,
+       |  `cr_return_quantity` int,
+       |  `cr_return_amount` decimal(7,2),
+       |  `cr_return_tax` decimal(7,2),
+       |  `cr_return_amt_inc_tax` decimal(7,2),
+       |  `cr_fee` decimal(7,2),
+       |  `cr_return_ship_cost` decimal(7,2),
+       |  `cr_refunded_cash` decimal(7,2),
+       |  `cr_reversed_charge` decimal(7,2),
+       |  `cr_store_credit` decimal(7,2),
+       |  `cr_net_loss` decimal(7,2)
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE inventory (
+       |  `inv_date_sk` int,
+       |  `inv_item_sk` int,
+       |  `inv_warehouse_sk` int,
+       |  `inv_quantity_on_hand` int
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE store_sales (
+       |  `ss_sold_date_sk` int,
+       |  `ss_sold_time_sk` int,
+       |  `ss_item_sk` int,
+       |  `ss_customer_sk` int,
+       |  `ss_cdemo_sk` int,
+       |  `ss_hdemo_sk` int,
+       |  `ss_addr_sk` int,
+       |  `ss_store_sk` int,
+       |  `ss_promo_sk` int,
+       |  `ss_ticket_number` bigint,
+       |  `ss_quantity` int,
+       |  `ss_wholesale_cost` decimal(7,2),
+       |  `ss_list_price` decimal(7,2),
+       |  `ss_sales_price` decimal(7,2),
+       |  `ss_ext_discount_amt` decimal(7,2),
+       |  `ss_ext_sales_price` decimal(7,2),
+       |  `ss_ext_wholesale_cost` decimal(7,2),
+       |  `ss_ext_list_price` decimal(7,2),
+       |  `ss_ext_tax` decimal(7,2),
+       |  `ss_coupon_amt` decimal(7,2),
+       |  `ss_net_paid` decimal(7,2),
+       |  `ss_net_paid_inc_tax` decimal(7,2),
+       |  `ss_net_profit` decimal(7,2)
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE store_returns (
+       |  `sr_returned_date_sk` int,
+       |  `sr_return_time_sk` int,
+       |  `sr_item_sk` int,
+       |  `sr_customer_sk` int,
+       |  `sr_cdemo_sk` int,
+       |  `sr_hdemo_sk` int,
+       |  `sr_addr_sk` int,
+       |  `sr_store_sk` int,
+       |  `sr_reason_sk` int,
+       |  `sr_ticket_number` bigint,
+       |  `sr_return_quantity` int,
+       |  `sr_return_amt` decimal(7,2),
+       |  `sr_return_tax` decimal(7,2),
+       |  `sr_return_amt_inc_tax` decimal(7,2),
+       |  `sr_fee` decimal(7,2),
+       |  `sr_return_ship_cost` decimal(7,2),
+       |  `sr_refunded_cash` decimal(7,2),
+       |  `sr_reversed_charge` decimal(7,2),
+       |  `sr_store_credit` decimal(7,2),
+       |  `sr_net_loss` decimal(7,2)
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE web_sales (
+       |  `ws_sold_date_sk` int,
+       |  `ws_sold_time_sk` int,
+       |  `ws_ship_date_sk` int,
+       |  `ws_item_sk` int,
+       |  `ws_bill_customer_sk` int,
+       |  `ws_bill_cdemo_sk` int,
+       |  `ws_bill_hdemo_sk` int,
+       |  `ws_bill_addr_sk` int,
+       |  `ws_ship_customer_sk` int,
+       |  `ws_ship_cdemo_sk` int,
+       |  `ws_ship_hdemo_sk` int,
+       |  `ws_ship_addr_sk` int,
+       |  `ws_web_page_sk` int,
+       |  `ws_web_site_sk` int,
+       |  `ws_ship_mode_sk` int,
+       |  `ws_warehouse_sk` int,
+       |  `ws_promo_sk` int,
+       |  `ws_order_number` bigint,
+       |  `ws_quantity` int,
+       |  `ws_wholesale_cost` decimal(7,2),
+       |  `ws_list_price` decimal(7,2),
+       |  `ws_sales_price` decimal(7,2),
+       |  `ws_ext_discount_amt` decimal(7,2),
+       |  `ws_ext_sales_price` decimal(7,2),
+       |  `ws_ext_wholesale_cost` decimal(7,2),
+       |  `ws_ext_list_price` decimal(7,2),
+       |  `ws_ext_tax` decimal(7,2),
+       |  `ws_coupon_amt` decimal(7,2),
+       |  `ws_ext_ship_cost` decimal(7,2),
+       |  `ws_net_paid` decimal(7,2),
+       |  `ws_net_paid_inc_tax` decimal(7,2),
+       |  `ws_net_paid_inc_ship` decimal(7,2),
+       |  `ws_net_paid_inc_ship_tax` decimal(7,2),
+       |  `ws_net_profit` decimal(7,2)
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE web_returns (
+       |  `wr_returned_date_sk` int,
+       |  `wr_returned_time_sk` int,
+       |  `wr_item_sk` int,
+       |  `wr_refunded_customer_sk` int,
+       |  `wr_refunded_cdemo_sk` int,
+       |  `wr_refunded_hdemo_sk` int,
+       |  `wr_refunded_addr_sk` int,
+       |  `wr_returning_customer_sk` int,
+       |  `wr_returning_cdemo_sk` int,
+       |  `wr_returning_hdemo_sk` int,
+       |  `wr_returning_addr_sk` int,
+       |  `wr_web_page_sk` int,
+       |  `wr_reason_sk` int,
+       |  `wr_order_number` bigint,
+       |  `wr_return_quantity` int,
+       |  `wr_return_amt` decimal(7,2),
+       |  `wr_return_tax` decimal(7,2),
+       |  `wr_return_amt_inc_tax` decimal(7,2),
+       |  `wr_fee` decimal(7,2),
+       |  `wr_return_ship_cost` decimal(7,2),
+       |  `wr_refunded_cash` decimal(7,2),
+       |  `wr_reversed_charge` decimal(7,2),
+       |  `wr_account_credit` decimal(7,2),
+       |  `wr_net_loss` decimal(7,2)
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE call_center (
+       |  `cc_call_center_sk` int,
+       |  `cc_call_center_id` string,
+       |  `cc_rec_start_date` date,
+       |  `cc_rec_end_date` date,
+       |  `cc_closed_date_sk` int,
+       |  `cc_open_date_sk` int,
+       |  `cc_name` string,
+       |  `cc_class` string,
+       |  `cc_employees` int,
+       |  `cc_sq_ft` int,
+       |  `cc_hours` string,
+       |  `cc_manager` string,
+       |  `cc_mkt_id` int,
+       |  `cc_mkt_class` string,
+       |  `cc_mkt_desc` string,
+       |  `cc_market_manager` string,
+       |  `cc_division` int,
+       |  `cc_division_name` string,
+       |  `cc_company` int,
+       |  `cc_company_name` string,
+       |  `cc_street_number` string,
+       |  `cc_street_name` string,
+       |  `cc_street_type` string,
+       |  `cc_suite_number` string,
+       |  `cc_city` string,
+       |  `cc_county` string,
+       |  `cc_state` string,
+       |  `cc_zip` string,
+       |  `cc_country` string,
+       |  `cc_gmt_offset` decimal(5,2),
+       |  `cc_tax_percentage` decimal(5,2)
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE catalog_page (
+       |  `cp_catalog_page_sk` int,
+       |  `cp_catalog_page_id` string,
+       |  `cp_start_date_sk` int,
+       |  `cp_end_date_sk` int,
+       |  `cp_department` string,
+       |  `cp_catalog_number` int,
+       |  `cp_catalog_page_number` int,
+       |  `cp_description` string,
+       |  `cp_type` string
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE customer (
+       |  `c_customer_sk` int,
+       |  `c_customer_id` string,
+       |  `c_current_cdemo_sk` int,
+       |  `c_current_hdemo_sk` int,
+       |  `c_current_addr_sk` int,
+       |  `c_first_shipto_date_sk` int,
+       |  `c_first_sales_date_sk` int,
+       |  `c_salutation` string,
+       |  `c_first_name` string,
+       |  `c_last_name` string,
+       |  `c_preferred_cust_flag` string,
+       |  `c_birth_day` int,
+       |  `c_birth_month` int,
+       |  `c_birth_year` int,
+       |  `c_birth_country` string,
+       |  `c_login` string,
+       |  `c_email_address` string,
+       |  `c_last_review_date` string
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE customer_address (
+       |  `ca_address_sk` int,
+       |  `ca_address_id` string,
+       |  `ca_street_number` string,
+       |  `ca_street_name` string,
+       |  `ca_street_type` string,
+       |  `ca_suite_number` string,
+       |  `ca_city` string,
+       |  `ca_county` string,
+       |  `ca_state` string,
+       |  `ca_zip` string,
+       |  `ca_country` string,
+       |  `ca_gmt_offset` decimal(5,2),
+       |  `ca_location_type` string
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE customer_demographics (
+       |  `cd_demo_sk` int,
+       |  `cd_gender` string,
+       |  `cd_marital_status` string,
+       |  `cd_education_status` string,
+       |  `cd_purchase_estimate` int,
+       |  `cd_credit_rating` string,
+       |  `cd_dep_count` int,
+       |  `cd_dep_employed_count` int,
+       |  `cd_dep_college_count` int
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE date_dim (
+       |  `d_date_sk` int,
+       |  `d_date_id` string,
+       |  `d_date` date,
+       |  `d_month_seq` int,
+       |  `d_week_seq` int,
+       |  `d_quarter_seq` int,
+       |  `d_year` int,
+       |  `d_dow` int,
+       |  `d_moy` int,
+       |  `d_dom` int,
+       |  `d_qoy` int,
+       |  `d_fy_year` int,
+       |  `d_fy_quarter_seq` int,
+       |  `d_fy_week_seq` int,
+       |  `d_day_name` string,
+       |  `d_quarter_name` string,
+       |  `d_holiday` string,
+       |  `d_weekend` string,
+       |  `d_following_holiday` string,
+       |  `d_first_dom` int,
+       |  `d_last_dom` int,
+       |  `d_same_day_ly` int,
+       |  `d_same_day_lq` int,
+       |  `d_current_day` string,
+       |  `d_current_week` string,
+       |  `d_current_month` string,
+       |  `d_current_quarter` string,
+       |  `d_current_year` string
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE household_demographics (
+       |  `hd_demo_sk` int,
+       |  `hd_income_band_sk` int,
+       |  `hd_buy_potential` string,
+       |  `hd_dep_count` int,
+       |  `hd_vehicle_count` int
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE income_band (
+       |  `ib_income_band_sk` int,
+       |  `ib_lower_bound` int,
+       |  `ib_upper_bound` int
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE item (
+       |  `i_item_sk` int,
+       |  `i_item_id` string,
+       |  `i_rec_start_date` date,
+       |  `i_rec_end_date` date,
+       |  `i_item_desc` string,
+       |  `i_current_price` decimal(7,2),
+       |  `i_wholesale_cost` decimal(7,2),
+       |  `i_brand_id` int,
+       |  `i_brand` string,
+       |  `i_class_id` int,
+       |  `i_class` string,
+       |  `i_category_id` int,
+       |  `i_category` string,
+       |  `i_manufact_id` int,
+       |  `i_manufact` string,
+       |  `i_size` string,
+       |  `i_formulation` string,
+       |  `i_color` string,
+       |  `i_units` string,
+       |  `i_container` string,
+       |  `i_manager_id` int,
+       |  `i_product_name` string
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE promotion (
+       |  `p_promo_sk` int,
+       |  `p_promo_id` string,
+       |  `p_start_date_sk` int,
+       |  `p_end_date_sk` int,
+       |  `p_item_sk` int,
+       |  `p_cost` decimal(15,2),
+       |  `p_response_target` int,
+       |  `p_promo_name` string,
+       |  `p_channel_dmail` string,
+       |  `p_channel_email` string,
+       |  `p_channel_catalog` string,
+       |  `p_channel_tv` string,
+       |  `p_channel_radio` string,
+       |  `p_channel_press` string,
+       |  `p_channel_event` string,
+       |  `p_channel_demo` string,
+       |  `p_channel_details` string,
+       |  `p_purpose` string,
+       |  `p_discount_active` string
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE reason (
+       |  `r_reason_sk` int,
+       |  `r_reason_id` string,
+       |  `r_reason_desc` string
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE ship_mode (
+       |  `sm_ship_mode_sk` int,
+       |  `sm_ship_mode_id` string,
+       |  `sm_type` string,
+       |  `sm_code` string,
+       |  `sm_carrier` string,
+       |  `sm_contract` string
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE store (
+       |  `s_store_sk` int,
+       |  `s_store_id` string,
+       |  `s_rec_start_date` date,
+       |  `s_rec_end_date` date,
+       |  `s_closed_date_sk` int,
+       |  `s_store_name` string,
+       |  `s_number_employees` int,
+       |  `s_floor_space` int,
+       |  `s_hours` string,
+       |  `s_manager` string,
+       |  `s_market_id` int,
+       |  `s_geography_class` string,
+       |  `s_market_desc` string,
+       |  `s_market_manager` string,
+       |  `s_division_id` int,
+       |  `s_division_name` string,
+       |  `s_company_id` int,
+       |  `s_company_name` string,
+       |  `s_street_number` string,
+       |  `s_street_name` string,
+       |  `s_street_type` string,
+       |  `s_suite_number` string,
+       |  `s_city` string,
+       |  `s_county` string,
+       |  `s_state` string,
+       |  `s_zip` string,
+       |  `s_country ` string,
+       |  `s_gmt_offset` decimal(5,2),
+       |  `s_tax_precentage` decimal(5,2)
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE time_dim (
+       |  `t_time_sk` int,
+       |  `t_time_id` string,
+       |  `t_time` int,
+       |  `t_hour` int,
+       |  `t_minute` int,
+       |  `t_second` int,
+       |  `t_am_pm` string,
+       |  `t_shift` string,
+       |  `t_sub_shift` string,
+       |  `t_meal_time` string
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE warehouse (
+       |  `w_warehouse_sk` int,
+       |  `w_warehouse_id` string,
+       |  `w_warehouse_name` string,
+       |  `w_warehouse_sq_ft` int,
+       |  `w_street_number` string,
+       |  `w_street_name` string,
+       |  `w_street_type` string,
+       |  `w_suite_number` string,
+       |  `w_city` string,
+       |  `w_county` string,
+       |  `w_state` string,
+       |  `w_zip` string,
+       |  `w_country` string,
+       |  `w_gmt_offset` decimal(5,2)
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE web_page (
+       |  `wp_web_page_sk` int,
+       |  `wp_web_page_id` string,
+       |  `wp_rec_start_date` date,
+       |  `wp_rec_end_date` date,
+       |  `wp_creation_date_sk` int,
+       |  `wp_access_date_sk` int,
+       |  `wp_autogen_flag` string,
+       |  `wp_customer_sk` int,
+       |  `wp_url` string,
+       |  `wp_type` string,
+       |  `wp_char_count` int,
+       |  `wp_link_count` int,
+       |  `wp_image_count` int,
+       |  `wp_max_ad_count` int
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE web_site (
+       |  `web_site_sk` int,
+       |  `web_site_id` string,
+       |  `web_rec_start_date` date,
+       |  `web_rec_end_date` date,
+       |  `web_name` string,
+       |  `web_open_date_sk` int,
+       |  `web_close_date_sk` int,
+       |  `web_class` string,
+       |  `web_manager` string,
+       |  `web_mkt_id` int,
+       |  `web_mkt_class` string,
+       |  `web_mkt_desc` string,
+       |  `web_market_manager` string,
+       |  `web_company_id` int,
+       |  `web_company_name` string,
+       |  `web_street_number` string,
+       |  `web_street_name` string,
+       |  `web_street_type` string,
+       |  `web_suite_number` string,
+       |  `web_city` string,
+       |  `web_county` string,
+       |  `web_state` string,
+       |  `web_zip` string,
+       |  `web_country` string,
+       |  `web_gmt_offset` decimal(5,2),
+       |  `web_tax_percentage` decimal(5,2)
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+      """.stripMargin.trim,
+    s"""
+       |CREATE TABLE sdr_dyn_seq_custer_iot_all_hour_60min
+       |(
+       |    `dim_1`       String,
+       |    `dim_51`      String,
+       |    `starttime`   String,
+       |    `dim_2`       String,
+       |    `dim_3`       String,
+       |    `dim_4`       String,
+       |    `dim_5`       String,
+       |    `dim_6`       String,
+       |    `dim_7`       String,
+       |    `dim_8`       String,
+       |    `dim_9`       String,
+       |    `dim_10`      String,
+       |    `dim_11`      String,
+       |    `dim_12`      String,
+       |    `dim_13`      String,
+       |    `dim_14`      String,
+       |    `dim_15`      String,
+       |    `dim_16`      String,
+       |    `dim_17`      String,
+       |    `dim_18`      String,
+       |    `dim_19`      String,
+       |    `dim_20`      String,
+       |    `dim_21`      String,
+       |    `dim_22`      String,
+       |    `dim_23`      String,
+       |    `dim_24`      String,
+       |    `dim_25`      String,
+       |    `dim_26`      String,
+       |    `dim_27`      String,
+       |    `dim_28`      String,
+       |    `dim_29`      String,
+       |    `dim_30`      String,
+       |    `dim_31`      String,
+       |    `dim_32`      String,
+       |    `dim_33`      String,
+       |    `dim_34`      String,
+       |    `dim_35`      String,
+       |    `dim_36`      String,
+       |    `dim_37`      String,
+       |    `dim_38`      String,
+       |    `dim_39`      String,
+       |    `dim_40`      String,
+       |    `dim_41`      String,
+       |    `dim_42`      String,
+       |    `dim_43`      String,
+       |    `dim_44`      String,
+       |    `dim_45`      String,
+       |    `dim_46`      String,
+       |    `dim_47`      String,
+       |    `dim_48`      String,
+       |    `dim_49`      String,
+       |    `dim_50`      String,
+       |    `dim_52`      String,
+       |    `dim_53`      String,
+       |    `dim_54`      String,
+       |    `dim_55`      String,
+       |    `dim_56`      String,
+       |    `dim_57`      String,
+       |    `dim_58`      String,
+       |    `dim_59`      String,
+       |    `dim_60`      String,
+       |    `dim_61`      String,
+       |    `dim_62`      String,
+       |    `dim_63`      String,
+       |    `dim_64`      String,
+       |    `dim_65`      String,
+       |    `dim_66`      String,
+       |    `dim_67`      String,
+       |    `dim_68`      String,
+       |    `dim_69`      String,
+       |    `dim_70`      String,
+       |    `dim_71`      String,
+       |    `dim_72`      String,
+       |    `dim_73`      String,
+       |    `dim_74`      String,
+       |    `dim_75`      String,
+       |    `dim_76`      String,
+       |    `dim_77`      String,
+       |    `dim_78`      String,
+       |    `dim_79`      String,
+       |    `dim_80`      String,
+       |    `dim_81`      String,
+       |    `dim_82`      String,
+       |    `dim_83`      String,
+       |    `dim_84`      String,
+       |    `dim_85`      String,
+       |    `dim_86`      String,
+       |    `dim_87`      String,
+       |    `dim_88`      String,
+       |    `dim_89`      String,
+       |    `dim_90`      String,
+       |    `dim_91`      String,
+       |    `dim_92`      String,
+       |    `dim_93`      String,
+       |    `dim_94`      String,
+       |    `dim_95`      String,
+       |    `dim_96`      String,
+       |    `dim_97`      String,
+       |    `dim_98`      String,
+       |    `dim_99`      String,
+       |    `dim_100`     String,
+       |    `counter_1`   double,
+       |    `counter_2`   double,
+       |    `counter_3`   double,
+       |    `counter_4`   double,
+       |    `counter_5`   double,
+       |    `counter_6`   double,
+       |    `counter_7`   double,
+       |    `counter_8`   double,
+       |    `counter_9`   double,
+       |    `counter_10`  double,
+       |    `counter_11`  double,
+       |    `counter_12`  double,
+       |    `counter_13`  double,
+       |    `counter_14`  double,
+       |    `counter_15`  double,
+       |    `counter_16`  double,
+       |    `counter_17`  double,
+       |    `counter_18`  double,
+       |    `counter_19`  double,
+       |    `counter_20`  double,
+       |    `counter_21`  double,
+       |    `counter_22`  double,
+       |    `counter_23`  double,
+       |    `counter_24`  double,
+       |    `counter_25`  double,
+       |    `counter_26`  double,
+       |    `counter_27`  double,
+       |    `counter_28`  double,
+       |    `counter_29`  double,
+       |    `counter_30`  double,
+       |    `counter_31`  double,
+       |    `counter_32`  double,
+       |    `counter_33`  double,
+       |    `counter_34`  double,
+       |    `counter_35`  double,
+       |    `counter_36`  double,
+       |    `counter_37`  double,
+       |    `counter_38`  double,
+       |    `counter_39`  double,
+       |    `counter_40`  double,
+       |    `counter_41`  double,
+       |    `counter_42`  double,
+       |    `counter_43`  double,
+       |    `counter_44`  double,
+       |    `counter_45`  double,
+       |    `counter_46`  double,
+       |    `counter_47`  double,
+       |    `counter_48`  double,
+       |    `counter_49`  double,
+       |    `counter_50`  double,
+       |    `counter_51`  double,
+       |    `counter_52`  double,
+       |    `counter_53`  double,
+       |    `counter_54`  double,
+       |    `counter_55`  double,
+       |    `counter_56`  double,
+       |    `counter_57`  double,
+       |    `counter_58`  double,
+       |    `counter_59`  double,
+       |    `counter_60`  double,
+       |    `counter_61`  double,
+       |    `counter_62`  double,
+       |    `counter_63`  double,
+       |    `counter_64`  double,
+       |    `counter_65`  double,
+       |    `counter_66`  double,
+       |    `counter_67`  double,
+       |    `counter_68`  double,
+       |    `counter_69`  double,
+       |    `counter_70`  double,
+       |    `counter_71`  double,
+       |    `counter_72`  double,
+       |    `counter_73`  double,
+       |    `counter_74`  double,
+       |    `counter_75`  double,
+       |    `counter_76`  double,
+       |    `counter_77`  double,
+       |    `counter_78`  double,
+       |    `counter_79`  double,
+       |    `counter_80`  double,
+       |    `counter_81`  double,
+       |    `counter_82`  double,
+       |    `counter_83`  double,
+       |    `counter_84`  double,
+       |    `counter_85`  double,
+       |    `counter_86`  double,
+       |    `counter_87`  double,
+       |    `counter_88`  double,
+       |    `counter_89`  double,
+       |    `counter_90`  double,
+       |    `counter_91`  double,
+       |    `counter_92`  double,
+       |    `counter_93`  double,
+       |    `counter_94`  double,
+       |    `counter_95`  double,
+       |    `counter_96`  double,
+       |    `counter_97`  double,
+       |    `counter_98`  double,
+       |    `counter_99`  double,
+       |    `counter_100` double,
+       |    `batchno`     double
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+          """.stripMargin.trim,
+    s"""
+       |CREATE TABLE dim_apn_iot
+       |(
+       |    `city_ascription`     String,
+       |    `industry`            String,
+       |    `apn_name`            String,
+       |    `service_level`       String,
+       |    `customer_name`       String,
+       |    `id`                  bigint
+       |)
+       |STORED BY 'org.apache.carbondata.format'
+          """.stripMargin.trim,
+    s"""
+       |CREATE TABLE tradeflow_all (
+       | m_month      smallint,
+       | hs_code      string  ,
+       | country      smallint,
+       | dollar_value double  ,
+       | quantity     double  ,
+       | unit         smallint,
+       | b_country    smallint,
+       | imex         smallint,
+       | y_year       smallint)
+       |STORED BY 'org.apache.carbondata.format'
+          """.stripMargin.trim,
+    s"""
+       |CREATE TABLE country (
+       | countryid   smallint ,
+       | country_en  string   ,
+       | country_cn  string   )
+       |STORED BY 'org.apache.carbondata.format'
+          """.stripMargin.trim,
+    s"""
+       |CREATE TABLE updatetime (
+       | countryid     smallint ,
+       | imex          smallint ,
+       | hs_len        smallint ,
+       | minstartdate  string   ,
+       | startdate     string   ,
+       | newdate       string   ,
+       | minnewdate    string   )
+       |STORED BY 'org.apache.carbondata.format'
+          """.stripMargin.trim
+  )
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/TestSQLBatch.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/TestSQLBatch.scala b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/TestSQLBatch.scala
new file mode 100644
index 0000000..3806dac
--- /dev/null
+++ b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/TestSQLBatch.scala
@@ -0,0 +1,584 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv
+
+object TestSQLBatch {
+
+  val testSQLBatch = Seq[String](
+      s"""
+         |SELECT f1.A,COUNT(*) AS B 
+         |FROM
+         |  fact f1
+         |  JOIN dim d1 ON (f1.K = d1.K)
+         |WHERE f1.E IS NULL AND (f1.C > d1.E OR d1.E = 3)
+         |GROUP BY f1.A
+      """.stripMargin.trim,
+      s"""
+         |SELECT f1.A,COUNT(*) AS B 
+         |FROM
+         |  fact f1
+         |  JOIN dim d1 ON (f1.K = d1.K)
+         |  JOIN dim1 d2 ON (f1.K = d2.K AND d2.G > 0)
+         |WHERE f1.E IS NULL AND f1.C > d1.E
+         |GROUP BY f1.A
+      """.stripMargin.trim,
+      s"""
+         |SELECT substr(item.i_item_desc,1,30) itemdesc, item.i_item_sk item_sk, date_dim.d_date solddate, count(*) cnt
+         |FROM date_dim, store_sales, item
+         |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+         |      AND store_sales.ss_item_sk = item.i_item_sk
+         |      AND date_dim.d_year in (2000, 2000+1, 2000+2, 2000+3)
+         |GROUP BY substr(item.i_item_desc,1,30), item.i_item_sk,date_dim.d_date 
+      """.stripMargin.trim,
+      s"""
+         |SELECT item.i_item_desc, item.i_category, item.i_class, item.i_current_price, 
+         |       SUM(store_sales.ss_ext_sales_price) as itemrevenue,
+         |       SUM(store_sales.ss_ext_sales_price)*100/sum(sum(store_sales.ss_ext_sales_price)) over (partition by item.i_class) as revenueratio
+         |FROM date_dim, store_sales, item
+         |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+         |      AND store_sales.ss_item_sk = item.i_item_sk
+         |      AND item.i_category in ('Sport', 'Books', 'Home')
+         |      AND date_dim.d_date between cast('1999-02-22' as date) AND (cast('1999-02-22' as date) + interval 30 days)
+         |GROUP BY item.i_item_id, item.i_item_desc, item.i_category, item.i_class, item.i_current_price 
+      """.stripMargin.trim,
+      s"""
+         |SELECT 'store' channel, store_sales.ss_store_sk col_name, date_dim.d_year, date_dim.d_qoy, 
+         |       item.i_category, SUM(store_sales.ss_ext_sales_price) ext_sales_price 
+         |FROM date_dim, store_sales, item
+         |WHERE store_sales.ss_store_sk IS NULL
+         |      AND store_sales.ss_sold_date_sk = date_dim.d_date_sk
+         |      AND store_sales.ss_item_sk = item.i_item_sk
+         |GROUP BY channel, store_sales.ss_store_sk, date_dim.d_year, date_dim.d_qoy, item.i_category 
+      """.stripMargin.trim,
+      s"""
+         |SELECT 'store' channel, store_sales.ss_store_sk col_name, date_dim.d_year, date_dim.d_qoy, 
+         |       item.i_category, SUM(store_sales.ss_ext_sales_price) ext_sales_price 
+         |FROM date_dim, store_sales, item
+         |WHERE store_sales.ss_store_sk IS NULL
+         |      AND store_sales.ss_sold_date_sk = date_dim.d_date_sk
+         |      AND store_sales.ss_item_sk = item.i_item_sk
+         |GROUP BY store_sales.ss_store_sk, date_dim.d_year, date_dim.d_qoy, item.i_category 
+      """.stripMargin.trim,
+      s"""
+         |SELECT item.i_brand_id brand_id, item.i_brand brand, SUM(ss_ext_sales_price) ext_price 
+         |FROM date_dim, store_sales, item
+         |WHERE store_sales.ss_sold_date_sk = date_dim.d_date_sk
+         |      AND store_sales.ss_item_sk = item.i_item_sk
+         |      AND item.i_manager_id = 28
+         |      AND date_dim.d_year = 1999
+         |      AND date_dim.d_moy = 11
+         |GROUP BY item.i_brand_id, item.i_brand 
+      """.stripMargin.trim,
+      s"""
+         |SELECT item.i_brand_id brand_id, item.i_brand_id brand, SUM(ss_ext_sales_price) ext_price 
+         |FROM date_dim, store_sales, item
+         |WHERE store_sales.ss_sold_date_sk = date_dim.d_date_sk
+         |      AND store_sales.ss_item_sk = item.i_item_sk
+         |      AND item.i_manager_id = 28
+         |      AND date_dim.d_year = 1999
+         |      AND date_dim.d_moy = 11
+         |GROUP BY item.i_brand_id, item.i_class_id,item.i_category_id 
+      """.stripMargin.trim,
+      s"""
+         |SELECT 'store' channel, item.i_brand_id, item.i_class_id, item.i_category_id, 
+         |       SUM(store_sales.ss_quantity*store_sales.ss_list_price) sales, count(*) number_sales
+         |FROM date_dim, store_sales, item
+         |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+         |      AND store_sales.ss_item_sk = item.i_item_sk
+         |      AND date_dim.d_year = 1999 + 2
+         |      AND date_dim.d_moy = 11
+         |GROUP BY item.i_brand_id, item.i_class_id,item.i_category_id 
+      """.stripMargin.trim,
+      s"""
+         |SELECT substr(item.i_item_desc,1,30) itemdesc, item.i_item_sk item_sk, dt.d_date solddate, count(*) cnt
+         |FROM date_dim dt, store_sales, item
+         |WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
+         |      AND store_sales.ss_item_sk = item.i_item_sk
+         |      AND dt.d_year in (2000, 2000+1, 2000+2, 2000+3)
+         |GROUP BY substr(item.i_item_desc,1,30), item.i_item_sk,dt.d_date
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.B
+         |FROM
+         |  fact
+         |UNION ALL
+         |SELECT fact.B
+         |FROM
+         |  fact
+         |UNION ALL
+         |SELECT fact.B
+         |FROM
+         |  fact
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.C > dim.E AND dim.E IS NULL
+         |UNION ALL
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.C > dim.E AND dim.E IS NULL
+         |UNION ALL
+         |SELECT fact.B
+         |FROM
+         |  fact
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.C > dim.E AND dim.E IS NULL
+         |UNION ALL
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.C > dim.E AND dim.E IS NULL
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.C > dim.E AND dim.E IS NULL
+         |UNION ALL
+         |SELECT fact.B
+         |FROM
+         |  fact
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.C > dim.E AND dim.E IS NULL
+         |UNION ALL
+         |SELECT fact.A
+         |FROM
+         |  fact
+      """.stripMargin.trim,
+      s"""
+         |SELECT f1.A,f1.B,COUNT(*) AS A
+         |FROM
+         |  fact f1
+         |  JOIN dim d1 ON (f1.K = d1.K)
+         |GROUP BY f1.A,f1.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT f1.A,f1.B,COUNT(*) AS A
+         |FROM
+         |  fact f1
+         |  JOIN dim d1 ON (f1.K = d1.K)
+         |WHERE f1.E IS NULL AND f1.C > d1.E AND f1.B = 2
+         |GROUP BY f1.A,f1.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT f1.A,f1.B,COUNT(*) AS A
+         |FROM
+         |  fact f1
+         |  JOIN dim d1 ON (f1.K = d1.K)
+         |WHERE f1.E IS NULL AND f1.C > d1.E AND d1.E = 3
+         |GROUP BY f1.A,f1.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT f1.A,f1.B,COUNT(*) AS A
+         |FROM
+         |  fact f1
+         |  JOIN dim d1 ON (f1.K = d1.K)
+         |WHERE f1.E IS NULL AND f1.C > d1.E
+         |GROUP BY f1.A,f1.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT f1.A,f1.B,COUNT(*) AS A
+         |FROM
+         |  fact f1
+         |  JOIN dim d1 ON (f1.K = d1.K)
+         |  JOIN dim d2 ON (f1.K = d2.K AND d2.E > 0)
+         |WHERE f1.E IS NULL AND f1.C > d1.E
+         |GROUP BY f1.A,f1.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim d1 ON (fact.K = d1.K)
+         |  JOIN dim d2 ON (fact.K = d2.K AND d2.E > 0)
+         |WHERE fact.E IS NULL AND fact.C > d1.E
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |WHERE fact.C > dim.E AND (dim.E IS NULL OR dim1.G IS NULL)
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |WHERE fact.C > dim.E OR dim1.G IS NULL
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.C > dim.E OR dim.E IS NULL
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.C > dim.E AND dim.E IS NULL
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.C > dim.E
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K AND fact.K IS NOT NULL)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0 AND dim1.K IS NOT NULL)
+         |WHERE fact.E IS NULL AND fact.C > dim.E
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K AND fact.K IS NOT NULL)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.E IS NULL AND fact.C > dim.E
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.E IS NULL AND fact.C > dim.E
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K AND dim1.G > 0)
+         |WHERE fact.C > fact.E AND fact.C > dim.E
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |WHERE fact.C > fact.E AND (fact.C > dim.E OR dim1.G > 0)
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |WHERE fact.C > fact.E AND fact.C > dim.E OR dim1.G > 0
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |WHERE fact.C > fact.E AND fact.C > dim.E
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |WHERE fact.C > fact.E OR fact.C > dim.E
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |WHERE fact.C > fact.E
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,COUNT(*) AS A
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,COUNT(*) AS S1
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |GROUP BY fact.A
+         |--HAVING COUNT(*) > 5
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,COUNT(*)--, my_fun(3) AS S1
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |GROUP BY fact.A
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,COUNT(*) AS S1
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |GROUP BY fact.A
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,sum(cast(dim.D as bigint)) AS S1
+         |FROM
+         |  fact
+         |  JOIN dim ON (fact.K = dim.K)
+         |  JOIN dim1 ON (fact.K = dim1.K)
+         |GROUP BY fact.A
+      """.stripMargin.trim,
+      s"""
+         |SELECT FOO.A, sum(cast(FOO.B as bigint)) AS S
+         |FROM (SELECT fact.A, fact.B
+         |      FROM
+         |        fact
+         |        JOIN dim ON (fact.K = dim.K)) FOO
+         |GROUP BY FOO.A
+      """.stripMargin.trim,
+      s"""
+         |SELECT FOO.A, sum(cast(FOO.B as bigint)) AS S
+         |FROM (SELECT fact.A, fact.B
+         |      FROM
+         |        fact
+         |        JOIN dim ON (fact.K = dim.K)) FOO
+         |GROUP BY FOO.A
+      """.stripMargin.trim,
+      s"""
+         |SELECT f1.A,f1.B,COUNT(*)
+         |FROM
+         |  fact f1
+         |  JOIN fact f2 ON (f1.K = f2.K)
+         |  JOIN fact f3 ON (f1.K = f3.K)
+         |GROUP BY f1.A,f1.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,sum(cast(dim.D as bigint)) AS S1
+         |FROM
+         |  fact
+         |  LEFT OUTER JOIN dim ON (fact.K = dim.K)
+         |GROUP BY fact.A,fact.B
+      """.stripMargin.trim,
+      s"""
+         |SELECT fact.A,fact.B,fact.C,sum(cast(dim.D as bigint)) AS S1
+         |FROM
+         |  fact
+         |  LEFT OUTER JOIN dim ON (fact.K = dim.K)
+         |GROUP BY fact.A,fact.B,fact.C
+      """.stripMargin.trim,
+//      s"""
+//         |SELECT *
+//         |FROM fact, dim
+//      """.stripMargin.trim,
+      s"""
+         |SELECT store_sales.ss_store_sk,date_dim.d_year,
+         |       COUNT(*) numsales
+         |FROM date_dim, store_sales
+         |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+         |GROUP BY store_sales.ss_store_sk,date_dim.d_year GROUPING SETS (store_sales.ss_store_sk,date_dim.d_year)
+      """.stripMargin.trim,
+      s"""
+         |SELECT store_sales.ss_store_sk,date_dim.d_year,
+         |       SUM(store_sales.ss_ext_sales_price) as itemrevenue
+         |FROM date_dim, store_sales
+         |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+         |GROUP BY CUBE(store_sales.ss_store_sk,date_dim.d_year)
+      """.stripMargin.trim,
+      s"""
+         |SELECT date_dim.d_moy,date_dim.d_qoy, date_dim.d_year,
+         |       SUM(store_sales.ss_ext_sales_price) as itemrevenue
+         |FROM date_dim, store_sales
+         |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+         |GROUP BY ROLLUP(date_dim.d_moy,date_dim.d_qoy, date_dim.d_year)
+      """.stripMargin.trim
+      )
+    val testSQLBatch2 = Seq[String](
+        s"""
+           |SELECT f1.A,COUNT(*) AS B
+           |FROM
+           |  fact f1
+           |  JOIN dim d1 ON (f1.K = d1.K)
+           |WHERE f1.E IS NULL AND (f1.C > d1.E OR d1.E = 3)
+           |GROUP BY f1.A
+      """.stripMargin.trim,
+        s"""
+           |SELECT f1.A,COUNT(*) AS B
+           |FROM
+           |  fact f1
+           |  JOIN dim d1 ON (f1.K = d1.K)
+           |  JOIN dim1 d2 ON (f1.K = d2.K AND d2.G > 0)
+           |WHERE f1.E IS NULL AND f1.C > d1.E
+           |GROUP BY f1.A
+      """.stripMargin.trim,
+        s"""
+           |SELECT substr(item.i_item_desc,1,30) itemdesc, item.i_item_sk item_sk, date_dim.d_date solddate, count(*) cnt
+           |FROM date_dim, store_sales, item
+           |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+           |      AND store_sales.ss_item_sk = item.i_item_sk
+           |      AND date_dim.d_year in (2000, 2000+1, 2000+2, 2000+3)
+           |GROUP BY substr(item.i_item_desc,1,30), item.i_item_sk,date_dim.d_date
+      """.stripMargin.trim,
+        s"""
+           |SELECT item.i_item_desc, item.i_category, item.i_class, item.i_current_price,
+           |       SUM(store_sales.ss_ext_sales_price) as itemrevenue,
+           |       SUM(store_sales.ss_ext_sales_price)*100/sum(sum(store_sales.ss_ext_sales_price)) over (partition by item.i_class) as revenueratio
+           |FROM date_dim, store_sales, item
+           |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+           |      AND store_sales.ss_item_sk = item.i_item_sk
+           |      AND item.i_category in ('Sport', 'Books', 'Home')
+           |      AND date_dim.d_date between cast('1999-02-22' as date) AND (cast('1999-02-22' as date) + interval 30 days)
+           |GROUP BY item.i_item_id, item.i_item_desc, item.i_category, item.i_class, item.i_current_price
+      """.stripMargin.trim,
+        s"""
+           |SELECT 'store' channel, store_sales.ss_store_sk col_name, date_dim.d_year, date_dim.d_qoy,
+           |       item.i_category, SUM(store_sales.ss_ext_sales_price) ext_sales_price
+           |FROM date_dim, store_sales, item
+           |WHERE store_sales.ss_store_sk IS NULL
+           |      AND store_sales.ss_sold_date_sk = date_dim.d_date_sk
+           |      AND store_sales.ss_item_sk = item.i_item_sk
+           |GROUP BY channel, store_sales.ss_store_sk, date_dim.d_year, date_dim.d_qoy, item.i_category
+      """.stripMargin.trim,
+        s"""
+           |SELECT 'store' channel, store_sales.ss_store_sk col_name, date_dim.d_year, date_dim.d_qoy,
+           |       item.i_category, SUM(store_sales.ss_ext_sales_price) ext_sales_price
+           |FROM date_dim, store_sales, item
+           |WHERE store_sales.ss_store_sk IS NULL
+           |      AND store_sales.ss_sold_date_sk = date_dim.d_date_sk
+           |      AND store_sales.ss_item_sk = item.i_item_sk
+           |GROUP BY store_sales.ss_store_sk, date_dim.d_year, date_dim.d_qoy, item.i_category
+      """.stripMargin.trim,
+        s"""
+           |SELECT item.i_brand_id brand_id, item.i_brand brand, SUM(ss_ext_sales_price) ext_price
+           |FROM date_dim, store_sales, item
+           |WHERE store_sales.ss_sold_date_sk = date_dim.d_date_sk
+           |      AND store_sales.ss_item_sk = item.i_item_sk
+           |      AND item.i_manager_id = 28
+           |      AND date_dim.d_year = 1999
+           |      AND date_dim.d_moy = 11
+           |GROUP BY item.i_brand_id, item.i_brand
+      """.stripMargin.trim,
+        s"""
+           |SELECT item.i_brand_id brand_id, item.i_brand_id brand, SUM(ss_ext_sales_price) ext_price
+           |FROM date_dim, store_sales, item
+           |WHERE store_sales.ss_sold_date_sk = date_dim.d_date_sk
+           |      AND store_sales.ss_item_sk = item.i_item_sk
+           |      AND item.i_manager_id = 28
+           |      AND date_dim.d_year = 1999
+           |      AND date_dim.d_moy = 11
+           |GROUP BY item.i_brand_id, item.i_class_id,item.i_category_id
+      """.stripMargin.trim,
+        s"""
+           |SELECT 'store' channel, item.i_brand_id, item.i_class_id, item.i_category_id,
+           |       SUM(store_sales.ss_quantity*store_sales.ss_list_price) sales, count(*) number_sales
+           |FROM date_dim, store_sales, item
+           |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+           |      AND store_sales.ss_item_sk = item.i_item_sk
+           |      AND date_dim.d_year = 1999 + 2
+           |      AND date_dim.d_moy = 11
+           |GROUP BY item.i_brand_id, item.i_class_id,item.i_category_id
+      """.stripMargin.trim,
+        s"""
+           |SELECT substr(item.i_item_desc,1,30) itemdesc, item.i_item_sk item_sk, dt.d_date solddate, count(*) cnt
+           |FROM date_dim dt, store_sales, item
+           |WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
+           |      AND store_sales.ss_item_sk = item.i_item_sk
+           |      AND dt.d_year in (2000, 2000+1, 2000+2, 2000+3)
+           |GROUP BY substr(item.i_item_desc,1,30), item.i_item_sk,dt.d_date
+      """.stripMargin.trim,
+
+        s"""
+           |SELECT store_sales.ss_store_sk,date_dim.d_year,
+           |       COUNT(*) numsales
+           |FROM date_dim, store_sales
+           |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+           |GROUP BY store_sales.ss_store_sk,date_dim.d_year GROUPING SETS (store_sales.ss_store_sk,date_dim.d_year)
+      """.stripMargin.trim,
+        s"""
+           |SELECT store_sales.ss_store_sk,date_dim.d_year,
+           |       SUM(store_sales.ss_ext_sales_price) as itemrevenue
+           |FROM date_dim, store_sales
+           |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+           |GROUP BY CUBE(store_sales.ss_store_sk,date_dim.d_year)
+      """.stripMargin.trim,
+        s"""
+           |SELECT date_dim.d_moy,date_dim.d_qoy, date_dim.d_year,
+           |       SUM(store_sales.ss_ext_sales_price) as itemrevenue
+           |FROM date_dim, store_sales
+           |WHERE date_dim.d_date_sk = store_sales.ss_sold_date_sk
+           |GROUP BY ROLLUP(date_dim.d_moy,date_dim.d_qoy, date_dim.d_year)
+      """.stripMargin.trim
+    )
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ExtractJoinConditionsSuite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ExtractJoinConditionsSuite.scala b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ExtractJoinConditionsSuite.scala
new file mode 100644
index 0000000..2e91e80
--- /dev/null
+++ b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ExtractJoinConditionsSuite.scala
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans
+
+import org.apache.spark.sql.catalyst.dsl.expressions._
+import org.apache.spark.sql.catalyst.dsl.plans._
+import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
+import org.apache.spark.sql.catalyst.plans.{Inner, _}
+
+import org.apache.carbondata.mv.dsl._
+import org.apache.carbondata.mv.testutil.ModularPlanTest
+
+class ExtractJoinConditionsSuite extends ModularPlanTest {
+  val testRelation0 = LocalRelation('a.int, 'b.int, 'c.int)
+  val testRelation1 = LocalRelation('d.int)
+  val testRelation2 = LocalRelation('b.int,'c.int,'e.int)
+  
+  test("join only") {
+    val left = testRelation0.where('a === 1)
+    val right = testRelation1
+    val originalQuery =
+      left.join(right, condition = Some("d".attr === "b".attr || "d".attr === "c".attr)).analyze
+    val modularPlan = originalQuery.modularize 
+    val extracted = modularPlan.extractJoinConditions(modularPlan.children(0),modularPlan.children(1))
+    
+    val correctAnswer = originalQuery match {
+      case logical.Join(logical.Filter(cond1,logical.LocalRelation(tbl1,_)),logical.LocalRelation(tbl2,_),Inner,Some(cond2)) =>
+        Seq(cond2)
+    }
+    
+    compareExpressions(correctAnswer, extracted)
+  }
+  
+  test("join and filter") {
+    val left = testRelation0.where('b === 2).subquery('l)
+    val right = testRelation2.where('b === 2).subquery('r)
+    val originalQuery =
+      left.join(right,condition = Some("r.b".attr === 2 && "l.c".attr === "r.c".attr)).analyze
+    val modularPlan = originalQuery.modularize
+    val extracted = modularPlan.extractJoinConditions(modularPlan.children(0),modularPlan.children(1))
+    
+    val originalQuery1 =
+      left.join(right,condition = Some("l.c".attr === "r.c".attr)).analyze
+      
+    val correctAnswer = originalQuery1 match {
+      case logical.Join(logical.Filter(cond1,logical.LocalRelation(tbl1,_)),logical.Filter(cond2,logical.LocalRelation(tbl2,_)),Inner,Some(cond3)) =>
+        Seq(cond3)
+    }    
+    
+    compareExpressions(correctAnswer, extracted)
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/IsSPJGHSuite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/IsSPJGHSuite.scala b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/IsSPJGHSuite.scala
new file mode 100644
index 0000000..e80a0cb
--- /dev/null
+++ b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/IsSPJGHSuite.scala
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans
+
+import org.apache.spark.sql.catalyst.dsl.expressions._
+import org.apache.spark.sql.catalyst.dsl.plans._
+import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
+
+import org.apache.carbondata.mv.dsl._
+import org.apache.carbondata.mv.plans.modular.ModularPlan
+import org.apache.carbondata.mv.testutil.ModularPlanTest
+
+/**
+ * Tests for the isSPJGH function of [[ModularPlan]].
+ */
+class IsSPJGHSuite extends ModularPlanTest {
+  val testRelation0 = LocalRelation('a.int, 'b.int, 'c.int)
+  val testRelation1 = LocalRelation('d.int, 'e.int)
+
+  def assertIsSPJGH(plan: ModularPlan, result: Boolean = true): Unit = {
+    if (plan.isSPJGH != result) {
+      val ps = plan.toString
+      println(s"Plans should return sameResult = $result\n$ps")
+    }
+  }
+
+  test("project only") {
+    assertIsSPJGH(testRelation0.select('a).analyze.modularize)
+    assertIsSPJGH(testRelation0.select('a,'b).analyze.modularize)
+  }
+
+  test("groupby-project") {
+    assertIsSPJGH(testRelation0.select('a).groupBy('a)('a).select('a).analyze.modularize)
+    assertIsSPJGH(testRelation0.select('a,'b).groupBy('a,'b)('a,'b).select('a).analyze.modularize)
+  }
+
+  test("groupby-project-filter") {
+    assertIsSPJGH(testRelation0.where('a === 1).select('a,'b).groupBy('a,'b)('a,'b).select('a).analyze.modularize)   
+  }
+
+  test("groupby-project-filter-join") {
+    assertIsSPJGH(testRelation0.where('b === 1).join(testRelation1.where('d === 1),condition = Some("d".attr === "b".attr || "d".attr === "c".attr)).groupBy('b,'c)('b,'c).select('b).analyze.modularize)
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/LogicalToModularPlanSuite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/LogicalToModularPlanSuite.scala b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/LogicalToModularPlanSuite.scala
new file mode 100644
index 0000000..e5b6ca5
--- /dev/null
+++ b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/LogicalToModularPlanSuite.scala
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans
+
+import org.apache.spark.sql.catalyst._
+import org.apache.spark.sql.catalyst.dsl.expressions._
+import org.apache.spark.sql.catalyst.dsl.plans._
+import org.apache.spark.sql.catalyst.expressions.aggregate.Count
+import org.apache.spark.sql.catalyst.plans.logical._
+import org.apache.spark.sql.catalyst.plans.{LeftOuter, RightOuter, _}
+
+import org.apache.carbondata.mv.dsl._
+import org.apache.carbondata.mv.plans.modular.Flags._
+import org.apache.carbondata.mv.plans.modular.{JoinEdge, ModularRelation}
+import org.apache.carbondata.mv.testutil.ModularPlanTest
+
+class LogicalToModularPlanSuite extends ModularPlanTest {
+
+  val testRelation0 = LocalRelation('a.int, 'b.int, 'c.int)
+  
+  val testRelation1 = LocalRelation('d.int)
+  
+  val testRelation2 = LocalRelation('c.int, 'd.int)
+
+  test("select only") {
+    val originalQuery =
+      testRelation0
+        .select('a.attr)
+        .analyze
+    val modularized = originalQuery.modularize 
+    val correctAnswer = originalQuery match {
+      case logical.Project(proj,logical.LocalRelation(tbl,_)) =>
+        ModularRelation(null,null,tbl,NoFlags,Seq.empty).select(proj:_*)(tbl:_*)()(Map.empty)()
+    }
+    comparePlans(modularized, correctAnswer)
+  }
+  
+  test("select-project-groupby grouping without aggregate function") {
+    val originalQuery =
+      testRelation0
+        .select('a)
+        .groupBy('a)('a)
+        .select('a).analyze
+        
+    val modularized = originalQuery.modularize
+    val correctAnswer = originalQuery match {
+      case logical.Project(proj1,logical.Aggregate(grp,agg,logical.Project(proj2,logical.LocalRelation(tbl,_)))) =>
+        ModularRelation(null,null,tbl,NoFlags,Seq.empty).select(proj2:_*)(tbl:_*)()(Map.empty)().groupBy(agg:_*)(proj2:_*)(grp:_*).select(proj1:_*)(proj1:_*)()(Map.empty)() 
+    }   
+    comparePlans(modularized, correctAnswer)
+  }
+  
+  test("select-project with filter") {
+    val originalQuery =
+      testRelation0
+        .where('a + 'b === 1)
+        .select('a + 'b as 'e)
+        .analyze
+        
+    val modularized = originalQuery.modularize
+    val correctAnswer = originalQuery match {
+      case logical.Project(proj,logical.Filter(cond,logical.LocalRelation(tbl,_))) =>
+        ModularRelation(null,null,tbl,NoFlags,Seq.empty).select(proj:_*)(tbl:_*)(cond)(Map.empty)()  
+    }
+    comparePlans(modularized, correctAnswer)
+  }
+  
+  test("join") {
+    val left = testRelation0.where('a === 1)
+    val right = testRelation1
+    val originalQuery =
+      left.join(right, condition = Some("d".attr === "b".attr || "d".attr === "c".attr)).analyze
+      
+    val modularized = originalQuery.modularize 
+    val correctAnswer = originalQuery match {
+      case logical.Join(logical.Filter(cond1,logical.LocalRelation(tbl1,_)),logical.LocalRelation(tbl2,_),Inner,Some(cond2)) =>
+        Seq(ModularRelation(null,null,tbl1,NoFlags,Seq.empty),ModularRelation(null,null,tbl2,NoFlags,Seq.empty)).select(tbl1++tbl2:_*)(tbl1++tbl2:_*)(Seq(cond1,cond2):_*)(Map.empty)(JoinEdge(0,1,Inner)) 
+    } 
+    comparePlans(modularized, correctAnswer)
+  }
+  
+  test("left outer join") {
+    val left = testRelation0.where('a === 1)
+    val right = testRelation1
+    val originalQuery =
+      left.join(right, LeftOuter, condition = Some("d".attr === "b".attr || "d".attr === "c".attr)).analyze
+      
+    val modularized = originalQuery.modularize 
+    val correctAnswer = originalQuery match {
+      case logical.Join(logical.Filter(cond1,logical.LocalRelation(tbl1,_)),logical.LocalRelation(tbl2,_),LeftOuter,Some(cond2)) =>
+        Seq(ModularRelation(null,null,tbl1,NoFlags,Seq.empty),ModularRelation(null,null,tbl2,NoFlags,Seq.empty)).select(tbl1++tbl2:_*)(tbl1++tbl2:_*)(Seq(cond1,cond2):_*)(Map.empty)(JoinEdge(0,1,LeftOuter)) 
+    } 
+    comparePlans(modularized, correctAnswer)
+  }
+  
+  test("right outer join") {
+    val left = testRelation0.where('a === 1)
+    val right = testRelation1
+    val originalQuery =
+      left.join(right, RightOuter, condition = Some("d".attr === "b".attr || "d".attr === "c".attr)).analyze
+      
+    val modularized = originalQuery.modularize 
+    val correctAnswer = originalQuery match {
+      case logical.Join(logical.Filter(cond1,logical.LocalRelation(tbl1,_)),logical.LocalRelation(tbl2,_),RightOuter,Some(cond2)) =>
+        Seq(ModularRelation(null,null,tbl1,NoFlags,Seq.empty),ModularRelation(null,null,tbl2,NoFlags,Seq.empty)).select(tbl1++tbl2:_*)(tbl1++tbl2:_*)(Seq(cond1,cond2):_*)(Map.empty)(JoinEdge(0,1,RightOuter))  
+    } 
+    comparePlans(modularized, correctAnswer)
+  }
+  
+  test("joins: conjunctive predicates #1 with alias") {
+    val left = testRelation0.where('a === 1).subquery('x)
+    val right = testRelation1.subquery('y)
+    val originalQuery =
+      left.join(right, condition = Some("x.b".attr === "y.d".attr)).analyze
+    
+    val modularized = analysis.EliminateSubqueryAliases(originalQuery).modularize
+    val correctAnswer = originalQuery match {
+      case logical.Join(logical.Filter(cond1,logical.LocalRelation(tbl1,_)),logical.LocalRelation(tbl2,_),Inner,Some(cond2)) =>
+        Seq(ModularRelation(null,null,tbl1,NoFlags,Seq.empty),ModularRelation(null,null,tbl2,NoFlags,Seq.empty)).select(tbl1++tbl2:_*)(tbl1++tbl2:_*)(Seq(cond1,cond2):_*)(Map.empty)(JoinEdge(0,1,Inner))  
+    } 
+    comparePlans(modularized, correctAnswer)
+  }
+  
+  test("joins: conjunctive predicates #2 with alias") {
+    val lleft = testRelation0.where('a >= 3).subquery('z)
+    val left = testRelation0.where('a === 1).subquery('x)
+    val right = testRelation0.subquery('y)
+    val originalQuery =
+      lleft.join(
+        left.join(right, condition = Some("x.b".attr === "y.b".attr)),
+          condition = Some("z.a".attr === "x.b".attr))
+        .analyze
+        
+    val modularized = originalQuery.modularize
+    val correctAnswer = originalQuery match {
+      case logical.Join(logical.Filter(cond1,logical.LocalRelation(tbl1,_)),logical.Join(logical.Filter(cond2,logical.LocalRelation(tbl2,_)),logical.LocalRelation(tbl3,_),Inner,Some(cond3)),Inner,Some(cond4)) =>
+        Seq(ModularRelation(null,null,tbl1,NoFlags,Seq.empty),ModularRelation(null,null,tbl2,NoFlags,Seq.empty),ModularRelation(null,null,tbl3,NoFlags,Seq.empty)).select(tbl1++tbl2++tbl3:_*)(tbl1++tbl2++tbl3:_*)(Seq(cond1,cond2,cond3,cond4):_*)(Map.empty)(JoinEdge(0,1,Inner),JoinEdge(1,2,Inner))  
+    } 
+    comparePlans(modularized, correctAnswer)
+  }
+  
+  test("SPJGH query") {
+    val left = testRelation0.where('b >= 1).subquery('x)
+    val right = testRelation2.where('d >= 2).subquery('y)
+    
+    val originalQuery =
+      left.join(right, Inner, Option("x.c".attr ==="y.c".attr))
+        .groupBy("x.a".attr)("x.a".attr as 'f, Count("x.b") as 'g)
+        .select('f)
+        .where('g > 1).analyze
+        
+    val modularized = originalQuery.modularize
+    val correctAnswer = originalQuery match {
+      case logical.Project(proj0, logical.Filter(cond1, logical.Project(proj1, logical.Aggregate(grp,agg,logical.Join(logical.Filter(cond2,logical.LocalRelation(tbl1,_)),logical.Filter(cond3,logical.LocalRelation(tbl2,_)),Inner,Some(cond4)))))) =>
+        Seq(ModularRelation(null,null,tbl1,NoFlags,Seq.empty),ModularRelation(null,null,tbl2,NoFlags,Seq.empty)).select(tbl1++tbl2:_*)(tbl1++tbl2:_*)(Seq(cond2,cond3,cond4):_*)(Map.empty)(JoinEdge(0,1,Inner)).groupBy(agg:_*)(tbl1++tbl2:_*)(grp:_*).select(proj0:_*)(proj1:_*)(cond1)(Map.empty)()  
+    }   
+    comparePlans(modularized, correctAnswer)
+  }
+  
+  test("non-SPJGH query") {
+    val mqoAnswer = try testRelation0.where('b > 2).select('a).orderBy('a.asc).analyze.modularize catch {
+      case e: Exception =>
+        s"""
+          |Exception thrown while modularizing query:
+          |== Exception ==
+          |$e
+        """.stripMargin.trim
+    }
+    val correctAnswer =
+      s"""
+        |Exception thrown while modularizing query:
+        |== Exception ==
+        |java.lang.UnsupportedOperationException: unsupported operation: No modular plan for 
+        |Sort [a#0 ASC NULLS FIRST], true
+        |+- Project [a#0]
+        |   +- Filter (b#1 > 2)
+        |      +- LocalRelation <empty>, [a#0, b#1, c#2]  
+      """.stripMargin.trim
+    compareMessages(mqoAnswer.toString,correctAnswer)
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ModularToSQLSuite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ModularToSQLSuite.scala b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ModularToSQLSuite.scala
new file mode 100644
index 0000000..26f68fe
--- /dev/null
+++ b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ModularToSQLSuite.scala
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans
+
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.mv.dsl._
+import org.apache.carbondata.mv.testutil.ModularPlanTest
+
+class ModularToSQLSuite extends ModularPlanTest with BeforeAndAfterAll {
+  import org.apache.carbondata.mv.TestSQLBatch._
+
+  override protected def beforeAll(): Unit = {
+    drop
+
+    sql(
+      s"""
+         |CREATE TABLE Fact (
+         |  `A` int,
+         |  `B` int,
+         |  `C` int,
+         |  `E` int,
+         |  `K` int
+         |)
+         |ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
+         |STORED AS TEXTFILE
+        """.stripMargin.trim
+    )
+
+    sql(
+      s"""
+         |CREATE TABLE Dim (
+         |  `D` int,
+         |  `E` int,
+         |  `K` int
+         |)
+         |ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
+         |STORED AS TEXTFILE
+        """.stripMargin.trim
+    )
+
+    sql(
+      s"""
+         |CREATE TABLE Dim1 (
+         |  `F` int,
+         |  `G` int,
+         |  `K` int
+         |)
+         |ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
+         |STORED AS TEXTFILE
+        """.stripMargin.trim
+    )
+
+    sql(
+      s"""
+         |CREATE TABLE store_sales (
+         |  `ss_sold_date_sk` int,
+         |  `ss_item_sk` int,
+         |  `ss_quantity` int,
+         |  `ss_list_price` decimal(7,2),
+         |  `ss_ext_sales_price` decimal(7,2),
+         |  `ss_store_sk` int
+         |)
+         |ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
+         |STORED AS TEXTFILE
+        """.stripMargin.trim
+    )
+
+    sql(
+      s"""
+         |CREATE TABLE date_dim (
+         |  `d_date_sk` int,
+         |  `d_date` date,
+         |  `d_year` int,
+         |  `d_moy` int,
+         |  `d_qoy` int
+         |)
+         |ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
+         |STORED AS TEXTFILE
+        """.stripMargin.trim
+    )
+
+    sql(
+      s"""
+         |CREATE TABLE item (
+         |  `i_item_sk` int,
+         |  `i_item_id` string,
+         |  `i_brand` string,
+         |  `i_brand_id` int,
+         |  `i_item_desc` string,
+         |  `i_class_id` int,
+         |  `i_class` string,
+         |  `i_category` string,
+         |  `i_category_id` int,
+         |  `i_manager_id` int,
+         |  `i_current_price` decimal(7,2)
+         |)
+         |ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
+         |STORED AS TEXTFILE
+        """.stripMargin.trim
+    )
+
+    sqlContext.udf.register("my_fun", (s: Integer) => s)
+  }
+
+
+  private def drop = {
+    sql(s"drop table if exists Fact")
+    sql(s"drop table if exists Dim")
+    sql(s"drop table if exists Dim1")
+    sql(s"drop table if exists store_sales")
+    sql(s"drop table if exists date_dim")
+    sql(s"drop table if exists item")
+  }
+
+  test("convert modular plans to sqls") {
+    testSQLBatch.foreach { query =>
+      testPlan(query)
+    }
+  }
+
+  private def testPlan(query: String) = {
+    val analyzed = sql(query).queryExecution.analyzed
+    val optimized = analyzed.optimize
+    val modularPlan = analyzed.optimize.modularize
+
+    println(s"\n\n===== ACTUAL QUERY =====\n\n${ query } \n")
+
+    println(s"\n\n===== MODULAR PLAN =====\n\n${ modularPlan.treeString } \n")
+
+    val compactSql = modularPlan.asCompactSQL
+    val convertedSql = modularPlan.asOneLineSQL
+
+    println(s"\n\n===== CONVERTED SQL =====\n\n$compactSql \n")
+
+    val analyzed1 = sql(convertedSql).queryExecution.analyzed
+    val modularPlan1 = analyzed1.optimize.modularize
+
+    println(s"\n\n===== CONVERTED SQL =====\n\n$compactSql \n")
+
+    println(s"\n\n===== MODULAR PLAN1 =====\n\n${ modularPlan1.treeString } \n")
+
+    comparePlans(modularPlan, modularPlan1)
+  }
+
+  override protected def afterAll(): Unit = {
+    drop
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/SignatureSuite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/SignatureSuite.scala b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/SignatureSuite.scala
new file mode 100644
index 0000000..c64826f
--- /dev/null
+++ b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/SignatureSuite.scala
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans
+
+import org.apache.spark.sql.catalyst.util._
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.mv.dsl._
+import org.apache.carbondata.mv.plans.modular.ModularPlanSignatureGenerator
+import org.apache.carbondata.mv.testutil.ModularPlanTest
+import org.apache.carbondata.mv.testutil.Tpcds_1_4_Tables.tpcds1_4Tables
+
+class SignatureSuite extends ModularPlanTest with BeforeAndAfterAll {
+  import org.apache.carbondata.mv.TestSQLBatch._
+
+  override protected def beforeAll(): Unit = {
+    sql("drop database if exists tpcds1 cascade")
+    sql("create database tpcds1")
+    sql("use tpcds1")
+    tpcds1_4Tables.foreach { create_table =>
+      sql(create_table)
+    }
+
+    sql(
+      s"""
+         |CREATE TABLE Fact (
+         |  `A` int,
+         |  `B` int,
+         |  `C` int,
+         |  `E` int,
+         |  `K` int
+         |)
+         |ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
+         |STORED AS TEXTFILE
+        """.stripMargin.trim
+    )
+
+    sql(
+      s"""
+         |CREATE TABLE Dim (
+         |  `D` int,
+         |  `E` int,
+         |  `K` int
+         |)
+         |ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
+         |STORED AS TEXTFILE
+        """.stripMargin.trim
+    )
+
+    sql(
+      s"""
+         |CREATE TABLE Dim1 (
+         |  `F` int,
+         |  `G` int,
+         |  `K` int
+         |)
+         |ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
+         |STORED AS TEXTFILE
+        """.stripMargin.trim
+    )
+
+    sqlContext.udf.register("my_fun", (s: Integer) => s)
+  }
+
+
+  test("test signature computing") {
+
+    testSQLBatch.foreach { query =>
+      val analyzed = sql(query).queryExecution.analyzed
+      val modularPlan = analyzed.optimize.modularize
+      val sig = ModularPlanSignatureGenerator.generate(modularPlan)
+      sig match {
+        case Some(s) if (s.groupby != true || s.datasets != Set("default.fact","default.dim")) =>
+          println(
+              s"""
+              |=== FAIL: signature do not match ===
+              |${sideBySide(s.groupby.toString, true.toString).mkString("\n")}
+              |${sideBySide(s.datasets.toString, Set("Fact","Dim").toString).mkString("\n")}
+            """.stripMargin)
+        case _ =>
+      }
+    }
+  }
+
+  override protected def afterAll(): Unit = {
+    sql("use default")
+    sql("drop database if exists tpcds1 cascade")
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
index b851599..b43ae3f 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
@@ -44,7 +44,7 @@ import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionary
 import org.apache.carbondata.core.metadata.ColumnIdentifier
 import org.apache.carbondata.core.metadata.datatype.{DataType => CarbonDataType, DataTypes => CarbonDataTypes, StructField => CarbonStructField}
 import org.apache.carbondata.core.metadata.encoder.Encoding
-import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, DataMapSchemaStorageProvider}
+import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, DataMapSchema, DataMapSchemaStorageProvider}
 import org.apache.carbondata.core.metadata.schema.table.column.{CarbonColumn, ColumnSchema}
 import org.apache.carbondata.core.util.DataTypeUtil
 import org.apache.carbondata.processing.exception.DataLoadingException
@@ -601,11 +601,15 @@ object CarbonScalaUtil {
   /**
    * Create datamap provider using class name
    */
-  def createDataMapProvider(className: String, sparkSession: SparkSession,
-      storageProvider: DataMapSchemaStorageProvider): Object = {
+  def createDataMapProvider(
+      className: String,
+      sparkSession: SparkSession,
+      table: CarbonTable,
+      schema: DataMapSchema): Object = {
     CarbonReflectionUtils.createObject(
       className,
+      table,
       sparkSession,
-      storageProvider)._1.asInstanceOf[Object]
+      schema)._1.asInstanceOf[Object]
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/integration/spark-common/src/main/scala/org/apache/spark/sql/util/SparkSQLUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/util/SparkSQLUtil.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/util/SparkSQLUtil.scala
index 370f80c..2e31a82 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/util/SparkSQLUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/util/SparkSQLUtil.scala
@@ -17,9 +17,14 @@
 
 package org.apache.spark.sql.util
 
-import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.internal.SessionState
 
 object SparkSQLUtil {
   def sessionState(sparkSession: SparkSession): SessionState = sparkSession.sessionState
+
+  def execute(logicalPlan: LogicalPlan, sparkSession: SparkSession): DataFrame = {
+    Dataset.ofRows(sparkSession, logicalPlan)
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/integration/spark2/src/main/java/org/apache/carbondata/datamap/DataMapManager.java
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/java/org/apache/carbondata/datamap/DataMapManager.java b/integration/spark2/src/main/java/org/apache/carbondata/datamap/DataMapManager.java
index 3fe1f0d..1126b2e 100644
--- a/integration/spark2/src/main/java/org/apache/carbondata/datamap/DataMapManager.java
+++ b/integration/spark2/src/main/java/org/apache/carbondata/datamap/DataMapManager.java
@@ -21,12 +21,15 @@ import org.apache.carbondata.common.exceptions.sql.MalformedDataMapCommandExcept
 import org.apache.carbondata.core.datamap.DataMapProvider;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.DataMapSchema;
+import org.apache.carbondata.spark.util.CarbonScalaUtil;
 
+import static org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider.MV;
 import static org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider.PREAGGREGATE;
 import static org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider.TIMESERIES;
 
 import org.apache.spark.sql.SparkSession;
 
+
 public class DataMapManager {
 
   private static DataMapManager INSTANCE;
@@ -50,6 +53,12 @@ public class DataMapManager {
       provider = new PreAggregateDataMapProvider(mainTable, dataMapSchema, sparkSession);
     } else if (dataMapSchema.getProviderName().equalsIgnoreCase(TIMESERIES.toString())) {
       provider = new TimeseriesDataMapProvider(mainTable, dataMapSchema, sparkSession);
+    } else if (dataMapSchema.getProviderName().equalsIgnoreCase(MV.toString())) {
+      provider = (DataMapProvider) CarbonScalaUtil.createDataMapProvider(
+          "org.apache.carbondata.mv.datamap.MVDataMapProvider",
+              sparkSession,
+              mainTable,
+              dataMapSchema);
     } else {
       provider = new IndexDataMapProvider(mainTable, dataMapSchema, sparkSession);
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
index cf5a4ae..a27b694 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
@@ -176,7 +176,14 @@ case class CarbonDropDataMapCommand(
         }
       }
     } else {
-      dropDataMapFromSystemFolder(sparkSession)
+      try {
+        dropDataMapFromSystemFolder(sparkSession)
+      } catch {
+        case e: Exception =>
+          if (!ifExistsSet) {
+            throw e
+          }
+      }
     }
 
       Seq.empty

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
index 61df9b1..9576fb1 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
@@ -29,6 +29,7 @@ import org.apache.spark.sql.execution.command.datamap.CarbonDropDataMapCommand
 import org.apache.carbondata.common.logging.{LogService, LogServiceFactory}
 import org.apache.carbondata.core.cache.dictionary.ManageDictionaryAndBTree
 import org.apache.carbondata.core.datamap.DataMapStoreManager
+import org.apache.carbondata.core.datamap.status.DataMapStatusManager
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.exception.ConcurrentOperationException
 import org.apache.carbondata.core.locks.{CarbonLockUtil, ICarbonLock, LockUsage}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 7c460e2..7273c76 100644
--- a/pom.xml
+++ b/pom.xml
@@ -636,6 +636,12 @@
         <script.exetension>.bat</script.exetension>
       </properties>
     </profile>
+    <profile>
+      <id>mv</id>
+      <modules>
+        <module>datamap/mv/plan</module>
+      </modules>
+    </profile>
   </profiles>
 
 </project>


[05/50] [abbrv] carbondata git commit: [CARBONDATA-2458] Remove unnecessary TableProvider interface

Posted by ma...@apache.org.
[CARBONDATA-2458] Remove unnecessary TableProvider interface

Remove unused / unnecessary TableProvider Interface

This closes #2280


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/2c0fa107
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/2c0fa107
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/2c0fa107

Branch: refs/heads/spark-2.3
Commit: 2c0fa107974145734ad8589d79ef273f98ed249d
Parents: d5da9a1
Author: dhatchayani <dh...@gmail.com>
Authored: Tue May 8 12:40:59 2018 +0530
Committer: Jacky Li <ja...@qq.com>
Committed: Wed May 9 10:20:03 2018 +0800

----------------------------------------------------------------------
 .../core/metadata/schema/table/CarbonTable.java |  6 +--
 .../executor/impl/AbstractQueryExecutor.java    |  6 +--
 .../core/scan/executor/util/QueryUtil.java      | 20 +++------
 .../scan/filter/FilterExpressionProcessor.java  | 16 +++----
 .../core/scan/filter/FilterProcessor.java       |  2 +-
 .../carbondata/core/scan/filter/FilterUtil.java | 44 +++++++-------------
 .../core/scan/filter/SingleTableProvider.java   | 41 ------------------
 .../core/scan/filter/TableProvider.java         | 29 -------------
 .../resolver/ConditionalFilterResolverImpl.java |  9 +---
 .../filter/resolver/FilterResolverIntf.java     |  3 +-
 .../resolver/LogicalFilterResolverImpl.java     |  5 +--
 .../resolver/RowLevelFilterResolverImpl.java    |  4 +-
 .../RowLevelRangeFilterResolverImpl.java        |  3 +-
 .../metadata/FilterResolverMetadata.java        | 10 -----
 .../FalseConditionalResolverImpl.java           |  4 +-
 .../TrueConditionalResolverImpl.java            |  4 +-
 .../visitor/DictionaryColumnVisitor.java        |  2 +-
 .../visitor/RangeDictionaryColumnVisitor.java   |  3 +-
 .../core/scan/model/QueryModelBuilder.java      |  4 +-
 .../hadoop/api/CarbonFileInputFormat.java       |  5 +--
 .../hadoop/api/CarbonTableInputFormat.java      |  8 +---
 21 files changed, 46 insertions(+), 182 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index cf5660f..5acca27 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -49,7 +49,6 @@ import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.FilterExpressionProcessor;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.intf.FilterOptimizer;
 import org.apache.carbondata.core.scan.filter.optimizer.RangeFilterOptmizer;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
@@ -916,12 +915,11 @@ public class CarbonTable implements Serializable {
   /**
    * Resolve the filter expression.
    */
-  public FilterResolverIntf resolveFilter(Expression filterExpression,
-      TableProvider tableProvider) {
+  public FilterResolverIntf resolveFilter(Expression filterExpression) {
     try {
       FilterExpressionProcessor filterExpressionProcessor = new FilterExpressionProcessor();
       return filterExpressionProcessor.getFilterResolver(
-          filterExpression, getAbsoluteTableIdentifier(), tableProvider);
+          filterExpression, getAbsoluteTableIdentifier());
     } catch (Exception e) {
       throw new RuntimeException("Error while resolving filter expression", e);
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index ed0350f..ff0e5ce 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -58,8 +58,6 @@ import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 import org.apache.carbondata.core.scan.executor.util.QueryUtil;
 import org.apache.carbondata.core.scan.executor.util.RestructureUtil;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.SingleTableProvider;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.model.ProjectionDimension;
 import org.apache.carbondata.core.scan.model.ProjectionMeasure;
 import org.apache.carbondata.core.scan.model.QueryModel;
@@ -178,7 +176,6 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
         queryProperties.complexFilterDimension, queryProperties.filterMeasures);
 
     CarbonTable carbonTable = queryModel.getTable();
-    TableProvider tableProvider = new SingleTableProvider(carbonTable);
 
     queryStatistic = new QueryStatistic();
     // dictionary column unique column id to dictionary mapping
@@ -187,8 +184,7 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
         QueryUtil.getDimensionDictionaryDetail(
             queryModel.getProjectionDimensions(),
             queryProperties.complexFilterDimension,
-            queryModel.getAbsoluteTableIdentifier(),
-            tableProvider);
+            carbonTable);
     queryStatistic
         .addStatistics(QueryStatisticsConstants.LOAD_DICTIONARY, System.currentTimeMillis());
     queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
index cc31efc..bb33b24 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
@@ -41,7 +41,6 @@ import org.apache.carbondata.core.keygenerator.KeyGenException;
 import org.apache.carbondata.core.keygenerator.KeyGenerator;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.metadata.CarbonMetadata;
-import org.apache.carbondata.core.metadata.CarbonTableIdentifier;
 import org.apache.carbondata.core.metadata.ColumnIdentifier;
 import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
@@ -57,7 +56,6 @@ import org.apache.carbondata.core.scan.executor.infos.KeyStructureInfo;
 import org.apache.carbondata.core.scan.expression.ColumnExpression;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.model.ProjectionDimension;
@@ -271,14 +269,12 @@ public class QueryUtil {
    *
    * @param queryDimensions         query dimension present in the query this will be used to
    *                                convert the result from surrogate key to actual data
-   * @param absoluteTableIdentifier absolute table identifier
    * @return dimension unique id to its dictionary map
    * @throws IOException
    */
   public static Map<String, Dictionary> getDimensionDictionaryDetail(
       List<ProjectionDimension> queryDimensions, Set<CarbonDimension> filterComplexDimensions,
-      AbsoluteTableIdentifier absoluteTableIdentifier, TableProvider tableProvider)
-      throws IOException {
+      CarbonTable carbonTable) throws IOException {
     // to store complex dimension and its child id unique column id list, this is required as
     // dimension can be present in  projection and filter
     // so we need to get only one instance of dictionary
@@ -309,7 +305,7 @@ public class QueryUtil {
     List<String> dictionaryColumnIdList =
         new ArrayList<String>(dictionaryDimensionFromQuery.size());
     dictionaryColumnIdList.addAll(dictionaryDimensionFromQuery);
-    return getDictionaryMap(dictionaryColumnIdList, absoluteTableIdentifier, tableProvider);
+    return getDictionaryMap(dictionaryColumnIdList, carbonTable);
   }
 
   /**
@@ -336,21 +332,18 @@ public class QueryUtil {
    * Below method will be used to get the column id to its dictionary mapping
    *
    * @param dictionaryColumnIdList  dictionary column list
-   * @param absoluteTableIdentifier absolute table identifier
    * @return dictionary mapping
    * @throws IOException
    */
   private static Map<String, Dictionary> getDictionaryMap(List<String> dictionaryColumnIdList,
-      AbsoluteTableIdentifier absoluteTableIdentifier, TableProvider tableProvider)
-      throws IOException {
+      CarbonTable carbonTable) throws IOException {
     // if any complex dimension not present in query then return the empty map
     if (dictionaryColumnIdList.size() == 0) {
       return new HashMap<>();
     }
     // this for dictionary unique identifier
     List<DictionaryColumnUniqueIdentifier> dictionaryColumnUniqueIdentifiers =
-        getDictionaryColumnUniqueIdentifierList(dictionaryColumnIdList,
-            absoluteTableIdentifier.getCarbonTableIdentifier(), tableProvider);
+        getDictionaryColumnUniqueIdentifierList(dictionaryColumnIdList, carbonTable);
     CacheProvider cacheProvider = CacheProvider.getInstance();
     Cache<DictionaryColumnUniqueIdentifier, Dictionary> forwardDictionaryCache = cacheProvider
         .createCache(CacheType.FORWARD_DICTIONARY);
@@ -371,13 +364,10 @@ public class QueryUtil {
    * Below method will be used to get the dictionary column unique identifier
    *
    * @param dictionaryColumnIdList dictionary
-   * @param carbonTableIdentifier
    * @return
    */
   private static List<DictionaryColumnUniqueIdentifier> getDictionaryColumnUniqueIdentifierList(
-      List<String> dictionaryColumnIdList, CarbonTableIdentifier carbonTableIdentifier,
-      TableProvider tableProvider) throws IOException {
-    CarbonTable carbonTable = tableProvider.getCarbonTable(carbonTableIdentifier);
+      List<String> dictionaryColumnIdList, CarbonTable carbonTable) throws IOException {
     List<DictionaryColumnUniqueIdentifier> dictionaryColumnUniqueIdentifiers =
         new ArrayList<>(dictionaryColumnIdList.size());
     for (String columnId : dictionaryColumnIdList) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
index 6adfb3a..dc9415e 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
@@ -78,10 +78,10 @@ public class FilterExpressionProcessor implements FilterProcessor {
    * @return a filter resolver tree
    */
   public FilterResolverIntf getFilterResolver(Expression expressionTree,
-      AbsoluteTableIdentifier tableIdentifier, TableProvider tableProvider)
+      AbsoluteTableIdentifier tableIdentifier)
       throws FilterUnsupportedException, IOException {
     if (null != expressionTree && null != tableIdentifier) {
-      return getFilterResolvertree(expressionTree, tableIdentifier, tableProvider);
+      return getFilterResolvertree(expressionTree, tableIdentifier);
     }
     return null;
   }
@@ -225,11 +225,11 @@ public class FilterExpressionProcessor implements FilterProcessor {
    * @return FilterResolverIntf type.
    */
   private FilterResolverIntf getFilterResolvertree(Expression expressionTree,
-      AbsoluteTableIdentifier tableIdentifier, TableProvider tableProvider)
+      AbsoluteTableIdentifier tableIdentifier)
       throws FilterUnsupportedException, IOException {
     FilterResolverIntf filterEvaluatorTree =
         createFilterResolverTree(expressionTree, tableIdentifier);
-    traverseAndResolveTree(filterEvaluatorTree, tableIdentifier, tableProvider);
+    traverseAndResolveTree(filterEvaluatorTree, tableIdentifier);
     return filterEvaluatorTree;
   }
 
@@ -243,14 +243,14 @@ public class FilterExpressionProcessor implements FilterProcessor {
    * @param tableIdentifier
    */
   private void traverseAndResolveTree(FilterResolverIntf filterResolverTree,
-      AbsoluteTableIdentifier tableIdentifier, TableProvider tableProvider)
+      AbsoluteTableIdentifier tableIdentifier)
       throws FilterUnsupportedException, IOException {
     if (null == filterResolverTree) {
       return;
     }
-    traverseAndResolveTree(filterResolverTree.getLeft(), tableIdentifier, tableProvider);
-    filterResolverTree.resolve(tableIdentifier, tableProvider);
-    traverseAndResolveTree(filterResolverTree.getRight(), tableIdentifier, tableProvider);
+    traverseAndResolveTree(filterResolverTree.getLeft(), tableIdentifier);
+    filterResolverTree.resolve(tableIdentifier);
+    traverseAndResolveTree(filterResolverTree.getRight(), tableIdentifier);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterProcessor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterProcessor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterProcessor.java
index 1ae795e..91cc0f8 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterProcessor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterProcessor.java
@@ -38,7 +38,7 @@ public interface FilterProcessor {
    * @throws FilterUnsupportedException
    */
   FilterResolverIntf getFilterResolver(Expression expressionTree,
-      AbsoluteTableIdentifier tableIdentifier, TableProvider tableProvider)
+      AbsoluteTableIdentifier tableIdentifier)
       throws FilterUnsupportedException, IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
index c91eb2e..2032ddb 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
@@ -54,6 +54,7 @@ import org.apache.carbondata.core.keygenerator.KeyGenException;
 import org.apache.carbondata.core.keygenerator.KeyGenerator;
 import org.apache.carbondata.core.keygenerator.mdkey.MultiDimKeyVarLengthGenerator;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.metadata.CarbonMetadata;
 import org.apache.carbondata.core.metadata.ColumnIdentifier;
 import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
@@ -535,8 +536,7 @@ public final class FilterUtil {
    * @throws QueryExecutionException
    */
   public static ColumnFilterInfo getFilterValues(AbsoluteTableIdentifier tableIdentifier,
-      ColumnExpression columnExpression, List<String> evaluateResultList, boolean isIncludeFilter,
-      TableProvider tableProvider)
+      ColumnExpression columnExpression, List<String> evaluateResultList, boolean isIncludeFilter)
       throws QueryExecutionException, FilterUnsupportedException, IOException {
     Dictionary forwardDictionary = null;
     ColumnFilterInfo filterInfo = null;
@@ -545,8 +545,7 @@ public final class FilterUtil {
     try {
       // Reading the dictionary value from cache.
       forwardDictionary =
-          getForwardDictionaryCache(tableIdentifier, columnExpression.getDimension(),
-              tableProvider);
+          getForwardDictionaryCache(tableIdentifier, columnExpression.getDimension());
       sortFilterModelMembers(columnExpression, evaluateResultList);
       getDictionaryValue(evaluateResultList, forwardDictionary, surrogates);
       filterInfo =
@@ -566,7 +565,6 @@ public final class FilterUtil {
    *
    * @param forwardDictionary
    * @param isIncludeFilter
-   * @param filterInfo
    * @param surrogates
    * @return
    */
@@ -704,14 +702,12 @@ public final class FilterUtil {
    */
   public static ColumnFilterInfo getFilterListForAllValues(AbsoluteTableIdentifier tableIdentifier,
       Expression expression, final ColumnExpression columnExpression, boolean isIncludeFilter,
-      TableProvider tableProvider, boolean isExprEvalReqd)
-      throws FilterUnsupportedException, IOException {
+      boolean isExprEvalReqd) throws FilterUnsupportedException, IOException {
     Dictionary forwardDictionary = null;
     List<Integer> surrogates = new ArrayList<Integer>(20);
     try {
       forwardDictionary =
-          getForwardDictionaryCache(tableIdentifier, columnExpression.getDimension(),
-              tableProvider);
+          getForwardDictionaryCache(tableIdentifier, columnExpression.getDimension());
       if (isExprEvalReqd && !isIncludeFilter) {
         surrogates.add(CarbonCommonConstants.DICT_VALUE_NULL);
       }
@@ -1238,35 +1234,25 @@ public final class FilterUtil {
   }
 
   /**
-   * @param tableIdentifier
+   * @param dictionarySourceAbsoluteTableIdentifier
    * @param carbonDimension
    * @return
    */
-  public static Dictionary getForwardDictionaryCache(AbsoluteTableIdentifier tableIdentifier,
-      CarbonDimension carbonDimension) throws IOException {
-    return getForwardDictionaryCache(tableIdentifier, carbonDimension, null);
-  }
-
-  /**
-   * @param carbonDimension
-   * @param tableProvider
-   * @return
-   */
   public static Dictionary getForwardDictionaryCache(
       AbsoluteTableIdentifier dictionarySourceAbsoluteTableIdentifier,
-      CarbonDimension carbonDimension, TableProvider tableProvider) throws IOException {
+      CarbonDimension carbonDimension) throws IOException {
     String dictionaryPath = null;
     ColumnIdentifier columnIdentifier = carbonDimension.getColumnIdentifier();
-    if (null != tableProvider) {
-      CarbonTable carbonTable = tableProvider
-          .getCarbonTable(dictionarySourceAbsoluteTableIdentifier.getCarbonTableIdentifier());
+    CarbonTable carbonTable = CarbonMetadata.getInstance()
+        .getCarbonTable(dictionarySourceAbsoluteTableIdentifier.getDatabaseName(),
+            dictionarySourceAbsoluteTableIdentifier.getTableName());
+    if (null != carbonTable) {
       dictionaryPath = carbonTable.getTableInfo().getFactTable().getTableProperties()
           .get(CarbonCommonConstants.DICTIONARY_PATH);
-      if (null != carbonDimension.getColumnSchema().getParentColumnTableRelations() &&
-          carbonDimension.getColumnSchema().getParentColumnTableRelations().size() == 1) {
-        dictionarySourceAbsoluteTableIdentifier =
-            QueryUtil.getTableIdentifierForColumn(carbonDimension,
-                carbonTable.getAbsoluteTableIdentifier());
+      if (null != carbonDimension.getColumnSchema().getParentColumnTableRelations()
+          && carbonDimension.getColumnSchema().getParentColumnTableRelations().size() == 1) {
+        dictionarySourceAbsoluteTableIdentifier = QueryUtil
+            .getTableIdentifierForColumn(carbonDimension, carbonTable.getAbsoluteTableIdentifier());
         columnIdentifier = new ColumnIdentifier(
             carbonDimension.getColumnSchema().getParentColumnTableRelations().get(0).getColumnId(),
             carbonDimension.getColumnProperties(), carbonDimension.getDataType());

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/SingleTableProvider.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/SingleTableProvider.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/SingleTableProvider.java
deleted file mode 100644
index d72e798..0000000
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/SingleTableProvider.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.core.scan.filter;
-
-import java.io.IOException;
-
-import org.apache.carbondata.core.metadata.CarbonTableIdentifier;
-import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
-
-public class SingleTableProvider implements TableProvider {
-
-  private CarbonTable carbonTable;
-
-  public SingleTableProvider(CarbonTable carbonTable) {
-    this.carbonTable = carbonTable;
-  }
-
-  @Override public CarbonTable getCarbonTable(CarbonTableIdentifier carbonTableIdentifier)
-      throws IOException {
-    if (carbonTable.getCarbonTableIdentifier().getTableUniqueName()
-        .equals(carbonTableIdentifier.getTableUniqueName())) {
-      return carbonTable;
-    } else {
-      throw new IOException("Carbon table does not exist with identifier " + carbonTableIdentifier);
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/TableProvider.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/TableProvider.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/TableProvider.java
deleted file mode 100644
index d3fa15a..0000000
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/TableProvider.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.core.scan.filter;
-
-import java.io.IOException;
-import java.io.Serializable;
-
-import org.apache.carbondata.core.metadata.CarbonTableIdentifier;
-import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
-
-public interface TableProvider extends Serializable {
-
-  CarbonTable getCarbonTable(CarbonTableIdentifier carbonTableIdentifier) throws IOException;
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java
index 996965d..ccbbc32 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java
@@ -31,7 +31,6 @@ import org.apache.carbondata.core.scan.expression.conditional.ConditionalExpress
 import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
 import org.apache.carbondata.core.scan.expression.logical.RangeExpression;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
 import org.apache.carbondata.core.scan.filter.resolver.metadata.FilterResolverMetadata;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
@@ -67,8 +66,7 @@ public class ConditionalFilterResolverImpl implements FilterResolverIntf {
    *
    * @throws FilterUnsupportedException
    */
-  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier,
-      TableProvider tableProvider)
+  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier)
       throws FilterUnsupportedException, IOException {
     FilterResolverMetadata metadata = new FilterResolverMetadata();
     metadata.setTableIdentifier(absoluteTableIdentifier);
@@ -81,7 +79,6 @@ public class ConditionalFilterResolverImpl implements FilterResolverIntf {
         metadata.setColumnExpression(columnExpression);
         metadata.setExpression(rightExp);
         metadata.setIncludeFilter(isIncludeFilter);
-        metadata.setTableProvider(tableProvider);
         // If imei=imei comes in filter condition then we need to
         // skip processing of right expression.
         // This flow has reached here assuming that this is a single
@@ -117,7 +114,6 @@ public class ConditionalFilterResolverImpl implements FilterResolverIntf {
         metadata.setColumnExpression(columnExpression);
         metadata.setExpression(leftExp);
         metadata.setIncludeFilter(isIncludeFilter);
-        metadata.setTableProvider(tableProvider);
         if (columnExpression.getDataType().equals(DataTypes.TIMESTAMP) ||
             columnExpression.getDataType().equals(DataTypes.DATE)) {
           isExpressionResolve = true;
@@ -153,7 +149,6 @@ public class ConditionalFilterResolverImpl implements FilterResolverIntf {
       metadata.setColumnExpression(columnList.get(0));
       metadata.setExpression(exp);
       metadata.setIncludeFilter(isIncludeFilter);
-      metadata.setTableProvider(tableProvider);
       if ((null != columnList.get(0).getDimension()) && (
           !columnList.get(0).getDimension().hasEncoding(Encoding.DICTIONARY) || columnList.get(0)
               .getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY))
@@ -167,7 +162,7 @@ public class ConditionalFilterResolverImpl implements FilterResolverIntf {
               ! columnList.get(0).getDimension().getDataType().isComplexType())) {
         dimColResolvedFilterInfo.setFilterValues(FilterUtil
             .getFilterListForAllValues(absoluteTableIdentifier, exp, columnList.get(0),
-                isIncludeFilter, tableProvider, isExpressionResolve));
+                isIncludeFilter, isExpressionResolve));
 
         dimColResolvedFilterInfo.setColumnIndex(columnList.get(0).getDimension().getOrdinal());
         dimColResolvedFilterInfo.setDimension(columnList.get(0).getDimension());

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/FilterResolverIntf.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/FilterResolverIntf.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/FilterResolverIntf.java
index a0810d5..89a3890 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/FilterResolverIntf.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/FilterResolverIntf.java
@@ -25,7 +25,6 @@ import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
@@ -40,7 +39,7 @@ public interface FilterResolverIntf extends Serializable {
    * @throws IOException
    * @throws FilterUnsupportedException
    */
-  void resolve(AbsoluteTableIdentifier absoluteTableIdentifier, TableProvider tableProvider)
+  void resolve(AbsoluteTableIdentifier absoluteTableIdentifier)
       throws IOException, FilterUnsupportedException;
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/LogicalFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/LogicalFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/LogicalFilterResolverImpl.java
index d530155..368cd9a 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/LogicalFilterResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/LogicalFilterResolverImpl.java
@@ -24,7 +24,6 @@ import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.scan.expression.BinaryExpression;
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.intf.ExpressionType;
 import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
@@ -57,10 +56,8 @@ public class LogicalFilterResolverImpl implements FilterResolverIntf {
    * node for filter evaluation, so in this instance no implementation is required.
    *
    * @param absoluteTableIdentifier
-   * @param tableProvider
    */
-  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier,
-      TableProvider tableProvider) {
+  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) {
 
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelFilterResolverImpl.java
index c846c02..f4d6fe8 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelFilterResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelFilterResolverImpl.java
@@ -25,7 +25,6 @@ import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.scan.expression.ColumnExpression;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.expression.conditional.ConditionalExpression;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
@@ -52,8 +51,7 @@ public class RowLevelFilterResolverImpl extends ConditionalFilterResolverImpl {
    * Method which will resolve the filter expression by converting the filter member
    * to its assigned dictionary values.
    */
-  public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier,
-      TableProvider tableProvider) {
+  public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) {
     DimColumnResolvedFilterInfo dimColumnEvaluatorInfo = null;
     MeasureColumnResolvedFilterInfo msrColumnEvalutorInfo = null;
     int index = 0;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
index c83e662..2bc73c5 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
@@ -41,7 +41,6 @@ import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedExc
 import org.apache.carbondata.core.scan.expression.logical.BinaryLogicalExpression;
 import org.apache.carbondata.core.scan.filter.ColumnFilterInfo;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
@@ -228,7 +227,7 @@ public class RowLevelRangeFilterResolverImpl extends ConditionalFilterResolverIm
    * Method which will resolve the filter expression by converting the filter
    * member to its assigned dictionary values.
    */
-  public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier, TableProvider tableProvider)
+  public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier)
       throws FilterUnsupportedException {
     DimColumnResolvedFilterInfo dimColumnEvaluatorInfo = null;
     MeasureColumnResolvedFilterInfo msrColumnEvalutorInfo = null;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/metadata/FilterResolverMetadata.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/metadata/FilterResolverMetadata.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/metadata/FilterResolverMetadata.java
index 8db80dc..324a94f 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/metadata/FilterResolverMetadata.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/metadata/FilterResolverMetadata.java
@@ -19,22 +19,12 @@ package org.apache.carbondata.core.scan.filter.resolver.metadata;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.scan.expression.ColumnExpression;
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 
 public class FilterResolverMetadata {
   private AbsoluteTableIdentifier tableIdentifier;
   private Expression expression;
   private ColumnExpression columnExpression;
 
-  public TableProvider getTableProvider() {
-    return tableProvider;
-  }
-
-  public void setTableProvider(TableProvider tableProvider) {
-    this.tableProvider = tableProvider;
-  }
-
-  private TableProvider tableProvider;
   private boolean isIncludeFilter;
 
   public AbsoluteTableIdentifier getTableIdentifier() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/FalseConditionalResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/FalseConditionalResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/FalseConditionalResolverImpl.java
index 63ce790..b5468ce 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/FalseConditionalResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/FalseConditionalResolverImpl.java
@@ -18,7 +18,6 @@ package org.apache.carbondata.core.scan.filter.resolver.resolverinfo;
 
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
 import org.apache.carbondata.core.scan.filter.resolver.ConditionalFilterResolverImpl;
 
@@ -32,8 +31,7 @@ public class FalseConditionalResolverImpl extends ConditionalFilterResolverImpl
     super(exp, isExpressionResolve, isIncludeFilter, false);
   }
 
-  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier,
-      TableProvider tableProvider) {
+  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) {
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/TrueConditionalResolverImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/TrueConditionalResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/TrueConditionalResolverImpl.java
index 104f69e..c316b5c 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/TrueConditionalResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/TrueConditionalResolverImpl.java
@@ -19,7 +19,6 @@ package org.apache.carbondata.core.scan.filter.resolver.resolverinfo;
 
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.intf.FilterExecuterType;
 import org.apache.carbondata.core.scan.filter.resolver.ConditionalFilterResolverImpl;
 
@@ -33,8 +32,7 @@ public class TrueConditionalResolverImpl extends ConditionalFilterResolverImpl {
     super(exp, isExpressionResolve, isIncludeFilter, false);
   }
 
-  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier,
-      TableProvider tableProvider) {
+  @Override public void resolve(AbsoluteTableIdentifier absoluteTableIdentifier) {
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java
index ee205ee..d2eecc9 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/DictionaryColumnVisitor.java
@@ -58,7 +58,7 @@ public class DictionaryColumnVisitor implements ResolvedFilterInfoVisitorIntf {
 
         resolvedFilterObject = FilterUtil
             .getFilterValues(metadata.getTableIdentifier(), metadata.getColumnExpression(),
-                evaluateResultListFinal, metadata.isIncludeFilter(), metadata.getTableProvider());
+                evaluateResultListFinal, metadata.isIncludeFilter());
         if (!metadata.isIncludeFilter() && null != resolvedFilterObject) {
           // Adding default surrogate key of null member inorder to not display the same while
           // displaying the report as per hive compatibility.

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/RangeDictionaryColumnVisitor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/RangeDictionaryColumnVisitor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/RangeDictionaryColumnVisitor.java
index b1e5a7d..a3ecd12 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/RangeDictionaryColumnVisitor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/resolverinfo/visitor/RangeDictionaryColumnVisitor.java
@@ -48,8 +48,7 @@ public class RangeDictionaryColumnVisitor extends DictionaryColumnVisitor
       ColumnFilterInfo resolvedFilterObject = null;
       resolvedFilterObject = FilterUtil
           .getFilterListForAllValues(metadata.getTableIdentifier(), metadata.getExpression(),
-              metadata.getColumnExpression(), metadata.isIncludeFilter(),
-              metadata.getTableProvider(), true);
+              metadata.getColumnExpression(), metadata.isIncludeFilter(), true);
 
       if (!metadata.isIncludeFilter() && null != resolvedFilterObject) {
         // Adding default surrogate key of null member inorder to not display the same while

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
index 31f5d0b..198608f 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
@@ -24,7 +24,6 @@ import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.SingleTableProvider;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.util.DataTypeConverter;
 
@@ -113,8 +112,7 @@ public class QueryModelBuilder {
     table.processFilterExpression(filterExpression, isFilterDimensions, isFilterMeasures);
     queryModel.setIsFilterDimensions(isFilterDimensions);
     queryModel.setIsFilterMeasures(isFilterMeasures);
-    FilterResolverIntf filterIntf =
-        table.resolveFilter(filterExpression, new SingleTableProvider(table));
+    FilterResolverIntf filterIntf = table.resolveFilter(filterExpression);
     queryModel.setFilterExpressionResolverTree(filterIntf);
     return queryModel;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java
index 0e8cf6a..8ed89d5 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java
@@ -39,8 +39,6 @@ import org.apache.carbondata.core.mutate.UpdateVO;
 import org.apache.carbondata.core.readcommitter.LatestFilesReadCommittedScope;
 import org.apache.carbondata.core.readcommitter.ReadCommittedScope;
 import org.apache.carbondata.core.scan.expression.Expression;
-import org.apache.carbondata.core.scan.filter.SingleTableProvider;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
 import org.apache.carbondata.core.statusmanager.SegmentUpdateStatusManager;
@@ -125,12 +123,11 @@ public class CarbonFileInputFormat<T> extends CarbonInputFormat<T> implements Se
         readCommittedScope = new LatestFilesReadCommittedScope(identifier.getTablePath());
       }
       Expression filter = getFilterPredicates(job.getConfiguration());
-      TableProvider tableProvider = new SingleTableProvider(carbonTable);
       // this will be null in case of corrupt schema file.
       PartitionInfo partitionInfo = carbonTable.getPartitionInfo(carbonTable.getTableName());
       carbonTable.processFilterExpression(filter, null, null);
 
-      FilterResolverIntf filterInterface = carbonTable.resolveFilter(filter, tableProvider);
+      FilterResolverIntf filterInterface = carbonTable.resolveFilter(filter);
 
       String segmentDir = null;
       if (carbonTable.isTransactionalTable()) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2c0fa107/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
index cd34bb8..990e9a7 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
@@ -51,8 +51,6 @@ import org.apache.carbondata.core.readcommitter.TableStatusReadCommittedScope;
 import org.apache.carbondata.core.reader.CarbonIndexFileReader;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.FilterExpressionProcessor;
-import org.apache.carbondata.core.scan.filter.SingleTableProvider;
-import org.apache.carbondata.core.scan.filter.TableProvider;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.statusmanager.FileFormat;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
@@ -224,7 +222,6 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
 
     // process and resolve the expression
     Expression filter = getFilterPredicates(job.getConfiguration());
-    TableProvider tableProvider = new SingleTableProvider(carbonTable);
     // this will be null in case of corrupt schema file.
     PartitionInfo partitionInfo = carbonTable.getPartitionInfo(carbonTable.getTableName());
     carbonTable.processFilterExpression(filter, null, null);
@@ -242,7 +239,7 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
       }
     }
 
-    FilterResolverIntf filterInterface = carbonTable.resolveFilter(filter, tableProvider);
+    FilterResolverIntf filterInterface = carbonTable.resolveFilter(filter);
 
     // do block filtering and get split
     List<InputSplit> splits =
@@ -440,7 +437,6 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
 
       carbonTable.processFilterExpression(filter, null, null);
 
-      TableProvider tableProvider = new SingleTableProvider(carbonTable);
       // prune partitions for filter query on partition table
       String partitionIds = job.getConfiguration().get(ALTER_PARTITION_ID);
       // matchedPartitions records partitionIndex, not partitionId
@@ -457,7 +453,7 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
         }
       }
 
-      FilterResolverIntf filterInterface = carbonTable.resolveFilter(filter, tableProvider);
+      FilterResolverIntf filterInterface = carbonTable.resolveFilter(filter);
       // do block filtering and get split
       List<InputSplit> splits = getSplits(job, filterInterface, segmentList, matchedPartitions,
           partitionInfo, oldPartitionIdList, new SegmentUpdateStatusManager(carbonTable));


[02/50] [abbrv] carbondata git commit: [HOTFIX][Search] Carbon should throw exception for TEXT_MATCH UDF

Posted by ma...@apache.org.
[HOTFIX][Search] Carbon should throw exception for TEXT_MATCH UDF

This closes #2242


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/325eac2b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/325eac2b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/325eac2b

Branch: refs/heads/spark-2.3
Commit: 325eac2b4acfc461533f806ac62572097a567276
Parents: 74ea24d
Author: Jacky Li <ja...@qq.com>
Authored: Fri Apr 27 21:42:33 2018 +0800
Committer: QiangCai <qi...@qq.com>
Committed: Tue May 8 19:23:18 2018 +0800

----------------------------------------------------------------------
 .../executer/RowLevelFilterExecuterImpl.java      |  8 +++++++-
 .../lucene/LuceneFineGrainDataMapSuite.scala      | 18 ++++++++++++++++++
 .../carbondata/spark/rdd/CarbonScanRDD.scala      |  2 +-
 3 files changed, 26 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/325eac2b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
index 7e92aef..4bee89b 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
@@ -193,7 +193,13 @@ public class RowLevelFilterExecuterImpl implements FilterExecuter {
   public BitSetGroup applyFilter(RawBlockletColumnChunks rawBlockletColumnChunks,
       boolean useBitsetPipeLine) throws FilterUnsupportedException, IOException {
     if (exp instanceof MatchExpression) {
-      return rawBlockletColumnChunks.getBitSetGroup();
+      BitSetGroup bitSetGroup = rawBlockletColumnChunks.getBitSetGroup();
+      if (bitSetGroup == null) {
+        // It means there are no datamap created on this table
+        throw new FilterUnsupportedException(String.format("%s is not supported on table %s",
+            exp.getFilterExpressionType().name(), tableIdentifier.getTableName()));
+      }
+      return bitSetGroup;
     }
     readColumnChunks(rawBlockletColumnChunks);
     // CHECKSTYLE:ON

http://git-wip-us.apache.org/repos/asf/carbondata/blob/325eac2b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index b0a59d4..406ee41 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -21,6 +21,7 @@ import java.io.{File, PrintWriter}
 
 import scala.util.Random
 
+import org.apache.spark.SparkException
 import org.apache.spark.sql.{CarbonEnv, Row}
 import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
@@ -29,6 +30,7 @@ import org.apache.carbondata.common.exceptions.sql.{MalformedCarbonCommandExcept
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.core.datamap.DataMapStoreManager
+import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException
 
 class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
 
@@ -699,6 +701,22 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
 
   }
 
+  test("test text_match on normal table") {
+    sql("DROP TABLE IF EXISTS table1")
+    sql(
+      """
+        | CREATE TABLE table1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_COLUMNS'='city,name')
+      """.stripMargin)
+    sql(s"LOAD DATA LOCAL INPATH '$file2' INTO TABLE table1 OPTIONS('header'='false')")
+    val msg = intercept[SparkException] {
+      sql("select * from table1 where TEXT_MATCH('name:n*')").show()
+    }
+    assert(msg.getCause.getMessage.contains("TEXT_MATCH is not supported on table"))
+    sql("DROP TABLE table1")
+  }
+
   override protected def afterAll(): Unit = {
     LuceneFineGrainDataMapSuite.deleteFile(file2)
     sql("DROP TABLE IF EXISTS normal_test")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/325eac2b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
index 24a6927..b9a3371 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
@@ -452,7 +452,7 @@ class CarbonScanRDD[T: ClassTag](
             reader.close()
           } catch {
             case e: Exception =>
-              LOGGER.error(e)
+              LogServiceFactory.getLogService(this.getClass.getCanonicalName).error(e)
           }
           reader = null
         }


[18/50] [abbrv] carbondata git commit: [CARBONDATA-2417] [SDK Writer] SDK writer goes to infinite wait when consumer thread goes dead

Posted by ma...@apache.org.
[CARBONDATA-2417] [SDK Writer] SDK writer goes to infinite wait when consumer thread goes dead

Problem:
SDK Writer going into infinte loop in case of multi-threaded scenario

Analysis:
In multi-threaded scenarios where multiple instances of writer thread are trying to add a row to the RowBatch, addition to given batch size cannot be ensured as addition process is not synchronized and it can lead to ArrayIndexOutOfBound Exception or data loss/mismatch issues.
If multiple writer threads are adding the data to RowBatch and immediately after launching all the threads closeWriter is called, in that case we don't know when all the data is finished writing by all the threads but we are returning immediately from close writer after setting the close flag to true. This does not ensure complete processing of data.

Solution:
Make the row addition logic synchronized and modify the code in closeWriter to ensure data completeness.

This closes #2286


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/fe436c3e
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/fe436c3e
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/fe436c3e

Branch: refs/heads/spark-2.3
Commit: fe436c3ecef632f6a5621d391c6dd8724813b25f
Parents: 35a7b5e
Author: manishgupta88 <to...@gmail.com>
Authored: Wed May 9 15:08:05 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu May 10 20:54:11 2018 +0530

----------------------------------------------------------------------
 docs/sdk-writer-guide.md                                       | 3 ++-
 .../loading/iterator/CarbonOutputIteratorWrapper.java          | 6 ++++--
 .../java/org/apache/carbondata/sdk/file/CSVCarbonWriter.java   | 1 -
 .../main/java/org/apache/carbondata/sdk/file/CarbonWriter.java | 3 ++-
 4 files changed, 8 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/fe436c3e/docs/sdk-writer-guide.md
----------------------------------------------------------------------
diff --git a/docs/sdk-writer-guide.md b/docs/sdk-writer-guide.md
index bfbf997..18b583a 100644
--- a/docs/sdk-writer-guide.md
+++ b/docs/sdk-writer-guide.md
@@ -281,7 +281,8 @@ public CarbonWriter buildWriterForAvroInput() throws IOException, InvalidLoadOpt
 /**
 * Write an object to the file, the format of the object depends on the implementation
 * If AvroCarbonWriter, object is of type org.apache.avro.generic.GenericData.Record 
-* If CSVCarbonWriter, object is of type String[] 
+* If CSVCarbonWriter, object is of type String[]
+* Note: This API is not thread safe
 * @param object
 * @throws IOException
 */

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fe436c3e/processing/src/main/java/org/apache/carbondata/processing/loading/iterator/CarbonOutputIteratorWrapper.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/iterator/CarbonOutputIteratorWrapper.java b/processing/src/main/java/org/apache/carbondata/processing/loading/iterator/CarbonOutputIteratorWrapper.java
index 4067be1..deb628c 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/iterator/CarbonOutputIteratorWrapper.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/iterator/CarbonOutputIteratorWrapper.java
@@ -21,6 +21,7 @@ import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.core.util.CarbonProperties;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -33,12 +34,12 @@ public class CarbonOutputIteratorWrapper extends CarbonIterator<Object[]> {
 
   private static final Log LOG = LogFactory.getLog(CarbonOutputIteratorWrapper.class);
 
-  private boolean close = false;
+  private boolean close;
 
   /**
    * Number of rows kept in memory at most will be batchSize * queue size
    */
-  private int batchSize = 1000;
+  private int batchSize = CarbonProperties.getInstance().getBatchSize();
 
   private RowBatch loadBatch = new RowBatch(batchSize);
 
@@ -98,6 +99,7 @@ public class CarbonOutputIteratorWrapper extends CarbonIterator<Object[]> {
         close = true;
         return;
       }
+      // below code will ensure that the last RowBatch is consumed properly
       loadBatch.readyRead();
       if (loadBatch.size > 0) {
         queue.put(loadBatch);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fe436c3e/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CSVCarbonWriter.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CSVCarbonWriter.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CSVCarbonWriter.java
index 62d5860..627e060 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CSVCarbonWriter.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CSVCarbonWriter.java
@@ -69,7 +69,6 @@ class CSVCarbonWriter extends CarbonWriter {
       writable.set((String[]) object);
       recordWriter.write(NullWritable.get(), writable);
     } catch (Exception e) {
-      close();
       throw new IOException(e);
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fe436c3e/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriter.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriter.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriter.java
index e29aa18..60ad060 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriter.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriter.java
@@ -32,7 +32,8 @@ public abstract class CarbonWriter {
 
   /**
    * Write an object to the file, the format of the object depends on the
-   * implementation
+   * implementation.
+   * Note: This API is not thread safe
    */
   public abstract void write(Object object) throws IOException;
 


[19/50] [abbrv] carbondata git commit: [CARBONDATA-2442][CARBONDATA-2469] Fixed: multiple issues in sdk writer and external table

Posted by ma...@apache.org.
[CARBONDATA-2442][CARBONDATA-2469] Fixed: multiple issues in sdk writer and external table

problem1: when two sdk writer output with differnt schema is placed in
same folder for reading, output is not as expected. It has many null
output.

root cause: when multiple carbondata and indexx files is placed in same
folder. table schema is inferred by first file.
comparing table schema with all other index file schema validation is
not present

solution: compare table schema with all other index file schema, if
there is a mismatch throw exception

problem2: External Table must show its location instead of default store location
solution: For external tables, show the carbon table path instead of default store location
in describe formatted

This closes #2273


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1dfbcfcc
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1dfbcfcc
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1dfbcfcc

Branch: refs/heads/spark-2.3
Commit: 1dfbcfccca5302fd02c31d2c2386cafa75b1f10c
Parents: fe436c3
Author: ajantha-bhat <aj...@gmail.com>
Authored: Sat May 5 16:59:44 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Thu May 10 23:02:43 2018 +0530

----------------------------------------------------------------------
 .../schema/table/column/ColumnSchema.java       |  30 ++++++
 .../LatestFilesReadCommittedScope.java          |   5 +
 .../apache/carbondata/core/util/CarbonUtil.java |  64 +++++++++---
 .../hadoop/api/CarbonTableInputFormat.java      |  43 ++++++++
 .../createTable/TestCreateExternalTable.scala   |   2 +
 .../TestNonTransactionalCarbonTable.scala       | 103 ++++++++++++++++++-
 .../table/CarbonDescribeFormattedCommand.scala  |   4 +-
 7 files changed, 233 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1dfbcfcc/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ColumnSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ColumnSchema.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ColumnSchema.java
index edede18..1f05f63 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ColumnSchema.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/column/ColumnSchema.java
@@ -343,6 +343,36 @@ public class ColumnSchema implements Serializable, Writable {
   }
 
   /**
+   * method to compare columnSchema,
+   * other parameters along with just column name and column data type
+   * @param obj
+   * @return
+   */
+  public boolean equalsWithStrictCheck(Object obj) {
+    if (!this.equals(obj)) {
+      return false;
+    }
+    ColumnSchema other = (ColumnSchema) obj;
+    if (!columnUniqueId.equals(other.columnUniqueId) ||
+        (isDimensionColumn != other.isDimensionColumn) ||
+        (scale != other.scale) ||
+        (precision != other.precision) ||
+        (isSortColumn != other.isSortColumn)) {
+      return false;
+    }
+    if (encodingList.size() != other.encodingList.size()) {
+      return false;
+    }
+    for (int i = 0; i < encodingList.size(); i++) {
+      if (encodingList.get(i).compareTo(other.encodingList.get(i)) != 0) {
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  /**
    * @return the dataType
    */
   public DataType getDataType() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1dfbcfcc/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java b/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
index 3f870b8..8abf537 100644
--- a/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
+++ b/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
@@ -122,6 +122,11 @@ public class LatestFilesReadCommittedScope implements ReadCommittedScope {
   @Override public void takeCarbonIndexFileSnapShot() throws IOException {
     // Read the current file Path get the list of indexes from the path.
     CarbonFile file = FileFactory.getCarbonFile(carbonFilePath);
+    if (file == null) {
+      // For nonTransactional table, files can be removed at any point of time.
+      // So cannot assume files will be present
+      throw new IOException("No files are present in the table location :"+ carbonFilePath);
+    }
     Map<String, List<String>> indexFileStore = new HashMap<>();
     if (file.isDirectory()) {
       CarbonFile[] carbonIndexFiles = SegmentIndexFileStore.getCarbonIndexFiles(carbonFilePath);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1dfbcfcc/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index b42167d..ac0a800 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -73,6 +73,7 @@ import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.metadata.schema.table.column.ParentColumnTableRelation;
 import org.apache.carbondata.core.mutate.UpdateVO;
 import org.apache.carbondata.core.reader.CarbonHeaderReader;
+import org.apache.carbondata.core.reader.CarbonIndexFileReader;
 import org.apache.carbondata.core.reader.ThriftReader;
 import org.apache.carbondata.core.reader.ThriftReader.TBaseCreator;
 import org.apache.carbondata.core.scan.model.ProjectionDimension;
@@ -2354,6 +2355,56 @@ public final class CarbonUtil {
     }
     CarbonHeaderReader carbonHeaderReader = new CarbonHeaderReader(fistFilePath);
     List<ColumnSchema> columnSchemaList = carbonHeaderReader.readSchema();
+    // only columnSchema is the valid entry, reset all dummy entries.
+    TableSchema tableSchema = getDummyTableSchema(tableName,columnSchemaList);
+
+    ThriftWrapperSchemaConverterImpl thriftWrapperSchemaConverter =
+        new ThriftWrapperSchemaConverterImpl();
+    org.apache.carbondata.format.TableSchema thriftFactTable =
+        thriftWrapperSchemaConverter.fromWrapperToExternalTableSchema(tableSchema);
+    org.apache.carbondata.format.TableInfo tableInfo =
+        new org.apache.carbondata.format.TableInfo(thriftFactTable,
+            new ArrayList<org.apache.carbondata.format.TableSchema>());
+
+    tableInfo.setDataMapSchemas(null);
+    return tableInfo;
+  }
+
+  /**
+   * This method will infer the schema file from a given index file path
+   * @param indexFilePath
+   * @param tableName
+   * @return
+   * @throws IOException
+   */
+  public static org.apache.carbondata.format.TableInfo inferSchemaFromIndexFile(
+      String indexFilePath, String tableName) throws IOException {
+    CarbonIndexFileReader indexFileReader = new CarbonIndexFileReader();
+    indexFileReader.openThriftReader(indexFilePath);
+    org.apache.carbondata.format.IndexHeader readIndexHeader = indexFileReader.readIndexHeader();
+    List<ColumnSchema> columnSchemaList = new ArrayList<ColumnSchema>();
+    List<org.apache.carbondata.format.ColumnSchema> table_columns =
+        readIndexHeader.getTable_columns();
+    for (int i = 0; i < table_columns.size(); i++) {
+      columnSchemaList.add(thriftColumnSchmeaToWrapperColumnSchema(table_columns.get(i)));
+    }
+    // only columnSchema is the valid entry, reset all dummy entries.
+    TableSchema tableSchema = getDummyTableSchema(tableName, columnSchemaList);
+
+    ThriftWrapperSchemaConverterImpl thriftWrapperSchemaConverter =
+        new ThriftWrapperSchemaConverterImpl();
+    org.apache.carbondata.format.TableSchema thriftFactTable =
+        thriftWrapperSchemaConverter.fromWrapperToExternalTableSchema(tableSchema);
+    org.apache.carbondata.format.TableInfo tableInfo =
+        new org.apache.carbondata.format.TableInfo(thriftFactTable,
+            new ArrayList<org.apache.carbondata.format.TableSchema>());
+
+    tableInfo.setDataMapSchemas(null);
+    return tableInfo;
+  }
+
+  private static TableSchema getDummyTableSchema(String tableName,
+      List<ColumnSchema> columnSchemaList) {
     TableSchema tableSchema = new TableSchema();
     tableSchema.setTableName(tableName);
     tableSchema.setBucketingInfo(null);
@@ -2361,8 +2412,6 @@ public final class CarbonUtil {
     tableSchema.setTableId(UUID.randomUUID().toString());
     tableSchema.setListOfColumns(columnSchemaList);
 
-    ThriftWrapperSchemaConverterImpl thriftWrapperSchemaConverter =
-        new ThriftWrapperSchemaConverterImpl();
     SchemaEvolutionEntry schemaEvolutionEntry = new SchemaEvolutionEntry();
     schemaEvolutionEntry.setTimeStamp(System.currentTimeMillis());
     SchemaEvolution schemaEvol = new SchemaEvolution();
@@ -2370,18 +2419,9 @@ public final class CarbonUtil {
     schEntryList.add(schemaEvolutionEntry);
     schemaEvol.setSchemaEvolutionEntryList(schEntryList);
     tableSchema.setSchemaEvalution(schemaEvol);
-
-    org.apache.carbondata.format.TableSchema thriftFactTable =
-        thriftWrapperSchemaConverter.fromWrapperToExternalTableSchema(tableSchema);
-    org.apache.carbondata.format.TableInfo tableInfo =
-        new org.apache.carbondata.format.TableInfo(thriftFactTable,
-            new ArrayList<org.apache.carbondata.format.TableSchema>());
-
-    tableInfo.setDataMapSchemas(null);
-    return tableInfo;
+    return tableSchema;
   }
 
-
   public static void dropDatabaseDirectory(String databasePath)
       throws IOException, InterruptedException {
     FileFactory.FileType fileType = FileFactory.getFileType(databasePath);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1dfbcfcc/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
index 990e9a7..a32e17a 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
@@ -36,11 +36,14 @@ import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.indexstore.ExtendedBlocklet;
 import org.apache.carbondata.core.indexstore.PartitionSpec;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.metadata.converter.SchemaConverter;
+import org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl;
 import org.apache.carbondata.core.metadata.schema.PartitionInfo;
 import org.apache.carbondata.core.metadata.schema.SchemaReader;
 import org.apache.carbondata.core.metadata.schema.partition.PartitionType;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.TableInfo;
+import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.mutate.CarbonUpdateUtil;
 import org.apache.carbondata.core.mutate.SegmentUpdateDetails;
 import org.apache.carbondata.core.mutate.UpdateVO;
@@ -149,6 +152,35 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
     SegmentStatusManager segmentStatusManager = new SegmentStatusManager(identifier);
     SegmentStatusManager.ValidAndInvalidSegmentsInfo segments = segmentStatusManager
         .getValidAndInvalidSegments(loadMetadataDetails, this.readCommittedScope);
+
+    // For NonTransactional table, compare the schema of all index files with inferred schema.
+    // If there is a mismatch throw exception. As all files must be of same schema.
+    if (!carbonTable.getTableInfo().isTransactionalTable()) {
+      SchemaConverter schemaConverter = new ThriftWrapperSchemaConverterImpl();
+      for (Segment segment : segments.getValidSegments()) {
+        Map<String, String> indexFiles = segment.getCommittedIndexFile();
+        for (Map.Entry<String, String> indexFileEntry : indexFiles.entrySet()) {
+          Path indexFile = new Path(indexFileEntry.getKey());
+          org.apache.carbondata.format.TableInfo tableInfo = CarbonUtil.inferSchemaFromIndexFile(
+              indexFile.toString(), carbonTable.getTableName());
+          TableInfo wrapperTableInfo = schemaConverter.fromExternalToWrapperTableInfo(
+              tableInfo, identifier.getDatabaseName(),
+              identifier.getTableName(),
+              identifier.getTablePath());
+          List<ColumnSchema> indexFileColumnList =
+              wrapperTableInfo.getFactTable().getListOfColumns();
+          List<ColumnSchema> tableColumnList =
+              carbonTable.getTableInfo().getFactTable().getListOfColumns();
+          if (!compareColumnSchemaList(indexFileColumnList, tableColumnList)) {
+            LOG.error("Schema of " + indexFile.getName()
+                + " doesn't match with the table's schema");
+            throw new IOException("All the files doesn't have same schema. "
+                + "Unsupported operation on nonTransactional table. Check logs.");
+          }
+        }
+      }
+    }
+
     // to check whether only streaming segments access is enabled or not,
     // if access streaming segment is true then data will be read from streaming segments
     boolean accessStreamingSegments = getAccessStreamingSegments(job.getConfiguration());
@@ -262,6 +294,17 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
     return splits;
   }
 
+  private boolean compareColumnSchemaList(List<ColumnSchema> indexFileColumnList,
+      List<ColumnSchema> tableColumnList) {
+    if (indexFileColumnList.size() != tableColumnList.size()) {
+      return false;
+    }
+    for (int i = 0; i < tableColumnList.size(); i++) {
+      return indexFileColumnList.get(i).equalsWithStrictCheck(tableColumnList.get(i));
+    }
+    return false;
+  }
+
   /**
    * Below method will be used to get the filter segments when query is fired on pre Aggregate
    * and main table in case of streaming.

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1dfbcfcc/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
index 67370eb..3b21d0a 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
@@ -53,6 +53,8 @@ class TestCreateExternalTable extends QueryTest with BeforeAndAfterAll {
        """.stripMargin)
     checkAnswer(sql("SELECT count(*) from source"), sql("SELECT count(*) from origin"))
 
+    checkExistence(sql("describe formatted source"), true, storeLocation+"/origin")
+
     val carbonTable = CarbonEnv.getCarbonTable(None, "source")(sqlContext.sparkSession)
     assert(carbonTable.isExternalTable)
     

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1dfbcfcc/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index ca6ac3c..3c51efe 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -19,7 +19,6 @@ package org.apache.carbondata.spark.testsuite.createTable
 
 import java.sql.Timestamp
 import java.io.{File, FileFilter, IOException}
-import java.io.{File, FileFilter}
 import java.util
 
 import org.apache.commons.io.FileUtils
@@ -33,8 +32,7 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.util.CarbonUtil
-import org.apache.carbondata.sdk.file.{CarbonWriter, CarbonWriterBuilder, Field, Schema}
-import org.apache.carbondata.sdk.file.{AvroCarbonWriter, CarbonWriter, Field, Schema}
+import org.apache.carbondata.sdk.file.AvroCarbonWriter
 import scala.collection.JavaConverters._
 import scala.collection.mutable
 
@@ -42,7 +40,7 @@ import org.apache.avro
 import org.apache.commons.lang.CharEncoding
 import tech.allegro.schema.json2avro.converter.JsonAvroConverter
 
-import org.apache.carbondata.core.metadata.datatype.{DataTypes, StructField}
+import org.apache.carbondata.core.metadata.datatype.DataTypes
 import org.apache.carbondata.sdk.file.{CarbonWriter, CarbonWriterBuilder, Field, Schema}
 
 
@@ -172,6 +170,35 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     }
   }
 
+  // prepare sdk writer output with other schema
+  def buildTestDataOtherDataType(rows: Int, sortColumns: Array[String]): Any = {
+    val fields: Array[Field] = new Array[Field](3)
+    // same column name, but name as boolean type
+    fields(0) = new Field("name", DataTypes.BOOLEAN)
+    fields(1) = new Field("age", DataTypes.INT)
+    fields(2) = new Field("height", DataTypes.DOUBLE)
+
+    try {
+      val builder = CarbonWriter.builder()
+      val writer =
+        builder.withSchema(new Schema(fields)).outputPath(writerPath)
+          .isTransactionalTable(false)
+          .uniqueIdentifier(System.currentTimeMillis()).withBlockSize(2).sortBy(sortColumns)
+          .buildWriterForCSVInput()
+
+      var i = 0
+      while (i < rows) {
+        writer.write(Array[String]("true", String.valueOf(i), String.valueOf(i.toDouble / 2)))
+        i += 1
+      }
+      writer.close()
+    } catch {
+      case ex: Exception => throw new RuntimeException(ex)
+      case _ => None
+    }
+  }
+
+
   def cleanTestData() = {
     FileUtils.deleteDirectory(new File(writerPath))
   }
@@ -212,6 +239,8 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
     checkExistence(sql("describe formatted sdkOutputTable"), true, "age,name")
 
+    checkExistence(sql("describe formatted sdkOutputTable"), true, writerPath)
+
     sql("DROP TABLE sdkOutputTable")
     // drop table should not delete the files
     assert(new File(writerPath).exists())
@@ -719,6 +748,72 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
   }
 
+
+  test("Read sdk two writer output with same column name but different sort columns") {
+    FileUtils.deleteDirectory(new File(writerPath))
+    buildTestDataOtherDataType(3, Array[String]("name"))
+    assert(new File(writerPath).exists())
+
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    checkAnswer(sql("select * from sdkOutputTable"), Seq(Row(true, 0, 0.0),
+      Row(true, 1, 0.5),
+      Row(true, 2, 1.0)))
+
+
+    buildTestDataOtherDataType(3, Array[String]("age"))
+    // put other sdk writer output to same path,
+    // which has same column names but different sort column
+    val exception =
+    intercept[IOException] {
+      sql("select * from sdkOutputTable").show(false)
+    }
+    assert(exception.getMessage()
+      .contains("All the files doesn't have same schema"))
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    assert(new File(writerPath).exists())
+    cleanTestData()
+  }
+
+
+  test("Read sdk two writer output with same column name but different data type ") {
+    buildTestDataSingleFile()
+    assert(new File(writerPath).exists())
+
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    checkAnswer(sql("select * from sdkOutputTable"), Seq(Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0)))
+
+    // put other sdk writer output to same path,
+    // which has same column names but different data type
+    buildTestDataOtherDataType(3, null)
+
+    val exception =
+      intercept[IOException] {
+        sql("select * from sdkOutputTable").show(false)
+      }
+    assert(exception.getMessage()
+      .contains("All the files doesn't have same schema"))
+
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    assert(new File(writerPath).exists())
+    cleanTestData()
+  }
+
   private def WriteFilesWithAvroWriter(rows: Int, mySchema: String, json: String): Unit = {
     // conversion to GenericData.Record
     val nn = new avro.Schema.Parser().parse(mySchema)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1dfbcfcc/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
index 9b69373..375c413 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
@@ -86,10 +86,10 @@ private[sql] case class CarbonDescribeFormattedCommand(
     results ++= Seq(("Database Name", relation.carbonTable.getDatabaseName, "")
     )
     results ++= Seq(("Table Name", relation.carbonTable.getTableName, ""))
-    if (carbonTable.isTransactionalTable) {
+    if (!carbonTable.isExternalTable) {
       results ++= Seq(("CARBON Store Path ", CarbonProperties.getStorePath, ""))
     } else {
-      // for NonTransactional table should show files path.
+      // for external table should show files path.
       results ++= Seq(("CARBON Store Path ", carbonTable.getTablePath, ""))
     }
 


[44/50] [abbrv] carbondata git commit: [CARBONDATA-2468] addition of column to default sort_column is handled

Posted by ma...@apache.org.
[CARBONDATA-2468] addition of column to default sort_column is handled

issue : default sort_column handling was missing
solution : condition added for default sort_columns

This closes #2293


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1c5b5265
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1c5b5265
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1c5b5265

Branch: refs/heads/spark-2.3
Commit: 1c5b5265921e89f20c5f5b69c135c9a1acc2d1df
Parents: f1a6c7c
Author: rahulforallp <ra...@knoldus.in>
Authored: Thu May 10 16:17:39 2018 +0530
Committer: kumarvishal09 <ku...@gmail.com>
Committed: Mon May 14 17:47:45 2018 +0530

----------------------------------------------------------------------
 .../TestNonTransactionalCarbonTable.scala       | 28 +++++++++++++++++---
 .../sdk/file/CarbonWriterBuilder.java           |  8 +++++-
 2 files changed, 32 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1c5b5265/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index 58ce5fa..fb9c862 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -98,9 +98,9 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     buildTestData(3, false, options)
   }
 
-  def buildTestDataWithSortColumns(): Any = {
+  def buildTestDataWithSortColumns(sortColumns: List[String]): Any = {
     FileUtils.deleteDirectory(new File(writerPath))
-    buildTestData(3, false, null, List("age", "name"))
+    buildTestData(3, false, null, sortColumns)
   }
 
   def buildTestData(rows: Int, persistSchema: Boolean, options: util.Map[String, String]): Any = {
@@ -302,7 +302,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
   }
 
   test("test create external table with sort columns") {
-    buildTestDataWithSortColumns()
+    buildTestDataWithSortColumns(List("age","name"))
     assert(new File(writerPath).exists())
     sql("DROP TABLE IF EXISTS sdkOutputTable")
 
@@ -316,6 +316,28 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
     checkExistence(sql("describe formatted sdkOutputTable"), true, writerPath)
 
+    buildTestDataWithSortColumns(List("age"))
+    assert(new File(writerPath).exists())
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    // with partition
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable(name string) PARTITIONED BY (age int) STORED BY
+         |'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    checkExistence(sql("describe formatted sdkOutputTable"), true, "age")
+
+    buildTestDataSingleFile()
+    assert(new File(writerPath).exists())
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    // with partition
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable(name string) PARTITIONED BY (age int) STORED BY
+         |'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    checkExistence(sql("describe formatted sdkOutputTable"), true, "name")
+
     sql("DROP TABLE sdkOutputTable")
     // drop table should not delete the files
     assert(new File(writerPath).exists())

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1c5b5265/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
index 00ba8a5..1816539 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
@@ -445,6 +445,7 @@ public class CarbonWriterBuilder {
             "column: " + sortColumn + " specified in sort columns does not exist in schema");
       }
     }
+    int i = 0;
     for (Field field : fields) {
       if (null != field) {
         int isSortColumn = sortColumnsList.indexOf(field.getFieldName());
@@ -481,9 +482,14 @@ public class CarbonWriterBuilder {
           ColumnSchema columnSchema = tableSchemaBuilder
               .addColumn(new StructField(field.getFieldName(), field.getDataType()),
                   valIndex, isSortColumn > -1);
-          columnSchema.setSortColumn(true);
           if (isSortColumn > -1) {
+            columnSchema.setSortColumn(true);
             sortColumnsSchemaList[isSortColumn] = columnSchema;
+          } else if (sortColumnsList.isEmpty() && columnSchema.isDimensionColumn()
+              && columnSchema.getNumberOfChild() < 1) {
+            columnSchema.setSortColumn(true);
+            sortColumnsSchemaList[i] = columnSchema;
+            i++;
           }
         }
       }


[13/50] [abbrv] carbondata git commit: [CARBONDATA-2352] Added SDV test cases for Partition with Pre-Aggregate table

Posted by ma...@apache.org.
[CARBONDATA-2352] Added SDV test cases for Partition with Pre-Aggregate table

Added SDV test cases for Support of partitioning with Pre-Aggregate tables.

This closes #2175


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/cc0cbbac
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/cc0cbbac
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/cc0cbbac

Branch: refs/heads/spark-2.3
Commit: cc0cbbac7dca8f14904f231df16a524c07c9af5f
Parents: e70b6b4
Author: praveenmeenakshi56 <pr...@gmail.com>
Authored: Mon Apr 16 15:14:20 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Thu May 10 12:49:39 2018 +0530

----------------------------------------------------------------------
 .../PartitionWithPreAggregateTestCase.scala     | 275 +++++++++++++++++++
 .../cluster/sdv/suite/SDVSuites.scala           |   6 +-
 2 files changed, 279 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/cc0cbbac/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/PartitionWithPreAggregateTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/PartitionWithPreAggregateTestCase.scala b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/PartitionWithPreAggregateTestCase.scala
new file mode 100644
index 0000000..87f5fda
--- /dev/null
+++ b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/PartitionWithPreAggregateTestCase.scala
@@ -0,0 +1,275 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.cluster.sdv.generated
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util._
+import org.scalatest.BeforeAndAfterAll
+/**
+  * Test Class for Support of Partition with PreAggregate table
+  */
+class PartitionWithPreAggregateTestCase extends QueryTest with BeforeAndAfterAll {
+  override def beforeAll = {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd HH:mm:ss")
+      .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy/MM/dd")
+  }
+
+  //Loading data into partitioned table with SORT_SCOPE=LOCAL_SORT
+  test("Partition-With-PreAggregate_TC001", Include) {
+    sql("drop table if exists partition_table")
+    sql(
+      s"""CREATE TABLE partition_table(shortField SHORT, intField INT, bigintField LONG,
+         |doubleField DOUBLE, timestamp TIMESTAMP, decimalField DECIMAL(18,2),dateField DATE,
+         |charField CHAR(5), floatField FLOAT ) PARTITIONED BY (stringField STRING) STORED BY
+         |'carbondata' TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT')""".stripMargin)
+    sql(
+      s"""load data inpath '$resourcesPath/Data/partition/list_partition_table.csv' into table
+         |partition_table""".stripMargin)
+    sql(
+      "create datamap ag1 on table partition_table using 'preaggregate' as select shortField, sum" +
+      "(intField) from partition_table group by shortField")
+    checkAnswer(sql(
+      s"""select decimalfield from partition_table where charfield='e' and
+         |floatfield=307.301 group by decimalfield limit 1""".stripMargin),
+      Seq(Row(159.10)))
+  }
+
+  //Loading data into partitioned table with SORT_SCOPE=GLOBAL_SORT
+  test("Partition-With-PreAggregate_TC002", Include) {
+    sql("drop table if exists partition_table")
+    sql(
+      s"""CREATE TABLE partition_table(shortField SHORT, intField INT, bigintField LONG,
+         |doubleField DOUBLE, timestamp TIMESTAMP, decimalField DECIMAL(18,2),dateField DATE,
+         |charField CHAR(5), floatField FLOAT ) PARTITIONED BY (stringField STRING) STORED BY
+         |'carbondata' TBLPROPERTIES('SORT_SCOPE'='GLOBAL_SORT')""".stripMargin)
+    sql(
+      s"""load data inpath '$resourcesPath/Data/partition/list_partition_table.csv' into table
+         |partition_table""".stripMargin)
+    sql(
+      "create datamap ag1 on table partition_table using 'preaggregate' as select shortField, sum" +
+      "(intField) from partition_table group by shortField")
+    checkAnswer(sql(
+      s"""select decimalfield from partition_table where charfield='e' and
+         |floatfield=307.301 group by decimalfield limit 1""".stripMargin),
+      Seq(Row(159.10)))
+  }
+
+  //Loading data into partitioned table with SORT_SCOPE=BATCH_SORT
+  test("Partition-With-PreAggregate_TC003", Include) {
+    sql("drop table if exists partition_table")
+    sql(
+      s"""CREATE TABLE partition_table(shortField SHORT, intField INT, bigintField LONG,
+         |doubleField DOUBLE, timestamp TIMESTAMP, decimalField DECIMAL(18,2),dateField DATE,
+         |charField CHAR(5), floatField FLOAT ) PARTITIONED BY (stringField STRING) STORED BY
+         |'carbondata' TBLPROPERTIES('SORT_SCOPE'='BATCH_SORT')""".stripMargin)
+    sql(
+      s"""load data inpath '$resourcesPath/Data/partition/list_partition_table.csv' into table
+         |partition_table""".stripMargin)
+    sql(
+      "create datamap ag1 on table partition_table using 'preaggregate' as select shortField, sum" +
+      "(intField) from partition_table group by shortField")
+    checkAnswer(sql(
+      s"""select decimalfield from partition_table where charfield='e' and
+         |floatfield=307.301 group by decimalfield limit 1""".stripMargin),
+      Seq(Row(159.10)))
+  }
+
+  //Loading data into partitioned table with SORT_SCOPE=NO_SORT
+  test("Partition-With-PreAggregate_TC004", Include) {
+    sql("drop table if exists partition_table")
+    sql(
+      s"""CREATE TABLE partition_table(shortField SHORT, intField INT, bigintField LONG,
+         |doubleField DOUBLE, timestamp TIMESTAMP, decimalField DECIMAL(18,2),dateField DATE,
+         |charField CHAR(5), floatField FLOAT ) PARTITIONED BY (stringField STRING) STORED BY
+         |'carbondata' TBLPROPERTIES('SORT_SCOPE'='NO_SORT')""".stripMargin)
+    sql(
+      s"""load data inpath '$resourcesPath/Data/partition/list_partition_table.csv' into table
+         |partition_table""".stripMargin)
+    sql(
+      "create datamap ag1 on table partition_table using 'preaggregate' as select shortField, sum" +
+      "(intField) from partition_table group by shortField")
+    checkAnswer(sql(
+      s"""select decimalfield from partition_table where charfield='e' and
+         |floatfield=307.301 group by decimalfield limit 1""".stripMargin),
+      Seq(Row(159.10)))
+  }
+
+  //Verify Aggregation query on Loaded Partition table with Pre-Aggregate table
+  test("Partition-With-PreAggregate_TC005", Include) {
+    sql("drop table if exists partition_table")
+    sql(
+      s"""CREATE TABLE partition_table(shortField SHORT, intField INT, bigintField LONG,
+         |doubleField DOUBLE, timestamp TIMESTAMP, decimalField DECIMAL(18,2),dateField DATE,
+         |charField CHAR(5), floatField FLOAT ) PARTITIONED BY (stringField STRING) STORED BY
+         |'carbondata'""".stripMargin)
+    sql(
+      "create datamap ag1 on table partition_table using 'preaggregate' as select shortField, sum" +
+      "(intField) from partition_table group by shortField")
+    sql(
+      s"""load data inpath '$resourcesPath/Data/partition/list_partition_table.csv' into table
+         |partition_table""".stripMargin)
+    assert(sql("explain select shortField, sum(intField) from partition_table group by shortField")
+      .collect().head.get(0).toString.contains("partition_table_ag1"))
+  }
+
+  //Verify load with Pre_Aggregate table on Partition table after compaction
+  test("Partition-With-PreAggregate_TC006", Include) {
+    sql("drop table if exists uniqdata")
+    sql(
+      s"""CREATE TABLE uniqdata (CUST_NAME String,ACTIVE_EMUI_VERSION string, DOB timestamp,
+         |BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),
+         |DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double,
+         |INTEGER_COLUMN1 int, DOJ timestamp) PARTITIONED BY (CUST_ID int) STORED BY
+         |'carbondata'""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql("create datamap ag1 on table uniqdata using 'preaggregate' as select cust_id, " +
+        "sum(Double_COLUMN1) from uniqdata group by cust_id")
+    sql(s"""alter table uniqdata compact 'minor'""")
+    assert(sql("show segments for table uniqdata").collect().tail(2).get(0).toString.equals("0.1"))
+  }
+
+  //Verify load with Pre_Aggregate table on Partition table after two-level compaction
+  test("Partition-With-PreAggregate_TC007", Include) {
+    sql("drop table if exists uniqdata")
+    sql(
+      s"""CREATE TABLE uniqdata (CUST_NAME String,ACTIVE_EMUI_VERSION string, DOB timestamp,
+         |BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),
+         |DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double,
+         |INTEGER_COLUMN1 int, DOJ timestamp) PARTITIONED BY (CUST_ID int) STORED BY
+         |'carbondata'""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql("create datamap ag1 on table uniqdata using 'preaggregate' as select cust_id, " +
+        "sum(Double_COLUMN1) from uniqdata group by cust_id")
+    sql(s"""alter table uniqdata compact 'minor'""")
+    sql(s"""alter table uniqdata compact 'major'""")
+    assert(sql("show segments for table uniqdata").collect().tail(7).get(0).toString.equals("0.2"))
+    }
+
+  //Verify join operation on Partitonwith Pre-Aggregate table
+    test("Partition-With-PreAggregate_TC008", Include) {
+      sql("drop table if exists uniqdata")
+      sql(s"""drop table if exists uniqdata1""")
+    sql(
+      s"""CREATE TABLE uniqdata (CUST_NAME String,ACTIVE_EMUI_VERSION string, DOB timestamp,
+         |BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),
+         |DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double,
+         |INTEGER_COLUMN1 int, DOJ timestamp) PARTITIONED BY (CUST_ID int) STORED BY 'carbondata'"""
+        .stripMargin)
+    sql(
+      s"""CREATE TABLE uniqdata1 (CUST_NAME String,ACTIVE_EMUI_VERSION string, DOB timestamp,
+         |BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),
+         |DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double,
+         |INTEGER_COLUMN1 int, DOJ timestamp) PARTITIONED BY (CUST_ID int) STORED BY 'carbondata'"""
+        .stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata1 partition(CUST_ID='1') OPTIONS('DELIMITER'=',','BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    sql("create datamap ag1 on table uniqdata using 'preaggregate' as select cust_id, " +
+        "sum(Double_COLUMN1) from uniqdata group by cust_id")
+    sql("create datamap ag1 on table uniqdata1 using 'preaggregate' as select cust_id, " +
+        "sum(Double_COLUMN1) from uniqdata1 group by cust_id")
+    checkAnswer(sql(
+      s""" select a.cust_id, b.cust_id from uniqdata a, uniqdata1 b where
+         |a.cust_id >= b.cust_id limit 1""".stripMargin),Seq(Row(1,1)))
+    sql(s"""drop table if exists uniqdata1""")
+  }
+
+  //Verify date with > filter condition on Partitonwith Pre-Aggregate table
+  test("Partition-With-PreAggregate_TC009", Include) {
+    sql("drop table if exists uniqdata")
+    sql(
+      s"""CREATE TABLE uniqdata (CUST_NAME String,ACTIVE_EMUI_VERSION string, DOB timestamp,
+         |BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),
+         |DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double,
+         |INTEGER_COLUMN1 int, DOJ timestamp) PARTITIONED BY (CUST_ID int) STORED BY 'carbondata'"""
+        .stripMargin)
+    sql("create datamap ag1 on table uniqdata using 'preaggregate' as select cust_id, " +
+        "sum(Double_COLUMN1) from uniqdata group by cust_id")
+    sql(
+      s"""LOAD DATA INPATH  '$resourcesPath/Data/partition/2000_UniqData_partition.csv' into table
+         | uniqdata partition(CUST_ID='4') OPTIONS('DELIMITER'=',' , 'BAD_RECORDS_ACTION'='FORCE',
+         | 'QUOTECHAR'='"','FILEHEADER'='CUST_NAME,ACTIVE_EMUI_VERSION,DOB,BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1,Double_COLUMN2,INTEGER_COLUMN1,DOJ,CUST_ID')""".stripMargin)
+    checkAnswer(sql(
+      s"""select cust_name from uniqdata where active_emui_version =
+         |'ACTIVE_EMUI_VERSION_00000'""".stripMargin),Seq(Row("CUST_NAME_00000")))
+  }
+
+  override def afterAll {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+        CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+      .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
+        CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
+    sql("drop table if exists uniqdata")
+    sql("drop table if exists partition_table")
+    sql(s"""drop table if exists uniqdata1""")
+
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cc0cbbac/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/suite/SDVSuites.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/suite/SDVSuites.scala b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/suite/SDVSuites.scala
index dd71f38..2f7d98b 100644
--- a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/suite/SDVSuites.scala
+++ b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/suite/SDVSuites.scala
@@ -61,7 +61,8 @@ class SDVSuites extends Suites with BeforeAndAfterAll {
                               new Vector2TestCase ::
                               new PreAggregateTestCase ::
                               new TimeSeriesPreAggregateTestCase ::
-                              new TestPartitionWithGlobalSort :: Nil
+                              new TestPartitionWithGlobalSort ::
+                              new PartitionWithPreAggregateTestCase :: Nil
 
   override val nestedSuites = suites.toIndexedSeq
 
@@ -149,7 +150,8 @@ class SDVSuites3 extends Suites with BeforeAndAfterAll {
                     new LuceneTestCase ::
                     new TimeSeriesPreAggregateTestCase :: 
                     new TestPartitionWithGlobalSort ::
-                    new SetParameterTestCase :: Nil
+                    new SetParameterTestCase ::
+                    new PartitionWithPreAggregateTestCase :: Nil
 
   override val nestedSuites = suites.toIndexedSeq
 


[43/50] [abbrv] carbondata git commit: [CARBONDATA-2431] Incremental data added after external table creation is not reflecting while doing select query issue is fixed.

Posted by ma...@apache.org.
[CARBONDATA-2431] Incremental data added after external table creation is not reflecting while doing select query issue is fixed.

This closes #2262


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/f1a6c7cf
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/f1a6c7cf
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/f1a6c7cf

Branch: refs/heads/spark-2.3
Commit: f1a6c7cf548cd33ef26bd99f26c7fcf7e367c9c7
Parents: 2881c6b
Author: rahulforallp <ra...@knoldus.in>
Authored: Thu May 3 14:11:12 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Mon May 14 11:27:33 2018 +0530

----------------------------------------------------------------------
 .../core/datamap/DataMapStoreManager.java       |  19 +--
 .../apache/carbondata/core/datamap/Segment.java |   7 +
 .../LatestFilesReadCommittedScope.java          |  32 ++++-
 .../ReadCommittedIndexFileSnapShot.java         |  10 +-
 .../core/readcommitter/ReadCommittedScope.java  |   5 +
 .../TableStatusReadCommittedScope.java          |  13 ++
 .../core/statusmanager/SegmentRefreshInfo.java  |  65 +++++++++
 .../hadoop/api/CarbonTableInputFormat.java      |  10 +-
 .../TestNonTransactionalCarbonTable.scala       | 136 +++++++++++++++++++
 9 files changed, 282 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/f1a6c7cf/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
index a3be26a..072b86e 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
@@ -43,6 +43,7 @@ import org.apache.carbondata.core.metadata.schema.table.DiskBasedDMSchemaStorage
 import org.apache.carbondata.core.metadata.schema.table.RelationIdentifier;
 import org.apache.carbondata.core.mutate.SegmentUpdateDetails;
 import org.apache.carbondata.core.mutate.UpdateVO;
+import org.apache.carbondata.core.statusmanager.SegmentRefreshInfo;
 import org.apache.carbondata.core.statusmanager.SegmentUpdateStatusManager;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.CarbonSessionInfo;
@@ -454,7 +455,7 @@ public final class DataMapStoreManager {
 
     // This map stores the latest segment refresh time.So in case of update/delete we check the
     // time against this map.
-    private Map<String, Long> segmentRefreshTime = new HashMap<>();
+    private Map<String, SegmentRefreshInfo> segmentRefreshTime = new HashMap<>();
 
     // This map keeps the manual refresh entries from users. It is mainly used for partition
     // altering.
@@ -465,23 +466,25 @@ public final class DataMapStoreManager {
       SegmentUpdateDetails[] updateStatusDetails = statusManager.getUpdateStatusDetails();
       for (SegmentUpdateDetails updateDetails : updateStatusDetails) {
         UpdateVO updateVO = statusManager.getInvalidTimestampRange(updateDetails.getSegmentName());
-        segmentRefreshTime.put(updateVO.getSegmentId(), updateVO.getCreatedOrUpdatedTimeStamp());
+        segmentRefreshTime.put(updateVO.getSegmentId(),
+            new SegmentRefreshInfo(updateVO.getCreatedOrUpdatedTimeStamp(), 0));
       }
     }
 
-    public boolean isRefreshNeeded(String segmentId, SegmentUpdateStatusManager statusManager) {
-      UpdateVO updateVO = statusManager.getInvalidTimestampRange(segmentId);
+    public boolean isRefreshNeeded(Segment seg, UpdateVO updateVo) throws IOException {
+      SegmentRefreshInfo segmentRefreshInfo =
+          seg.getSegmentRefreshInfo(updateVo);
+      String segmentId = seg.getSegmentNo();
       if (segmentRefreshTime.get(segmentId) == null) {
-        segmentRefreshTime.put(segmentId, updateVO.getCreatedOrUpdatedTimeStamp());
+        segmentRefreshTime.put(segmentId, segmentRefreshInfo);
         return true;
       }
       if (manualSegmentRefresh.get(segmentId) != null && manualSegmentRefresh.get(segmentId)) {
         manualSegmentRefresh.put(segmentId, false);
         return true;
       }
-      Long updateTimestamp = updateVO.getLatestUpdateTimestamp();
-      boolean isRefresh =
-          updateTimestamp != null && (updateTimestamp > segmentRefreshTime.get(segmentId));
+
+      boolean isRefresh = segmentRefreshInfo.compare(segmentRefreshTime.get(segmentId));
       if (isRefresh) {
         segmentRefreshTime.remove(segmentId);
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f1a6c7cf/core/src/main/java/org/apache/carbondata/core/datamap/Segment.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/Segment.java b/core/src/main/java/org/apache/carbondata/core/datamap/Segment.java
index 476f9da..85c7176 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/Segment.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/Segment.java
@@ -25,8 +25,10 @@ import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
 
+import org.apache.carbondata.core.mutate.UpdateVO;
 import org.apache.carbondata.core.readcommitter.ReadCommittedScope;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
+import org.apache.carbondata.core.statusmanager.SegmentRefreshInfo;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
@@ -111,6 +113,11 @@ public class Segment implements Serializable {
     return readCommittedScope.getCommittedIndexFile(this);
   }
 
+  public SegmentRefreshInfo getSegmentRefreshInfo(UpdateVO updateVo)
+      throws IOException {
+    return readCommittedScope.getCommitedSegmentRefreshInfo(this, updateVo);
+  }
+
   public String getSegmentNo() {
     return segmentNo;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f1a6c7cf/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java b/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
index de7f8a9..2306330 100644
--- a/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
+++ b/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
@@ -28,7 +28,9 @@ import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.indexstore.blockletindex.SegmentIndexFileStore;
+import org.apache.carbondata.core.mutate.UpdateVO;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
+import org.apache.carbondata.core.statusmanager.SegmentRefreshInfo;
 import org.apache.carbondata.core.statusmanager.SegmentStatus;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
@@ -43,7 +45,7 @@ public class LatestFilesReadCommittedScope implements ReadCommittedScope {
   private ReadCommittedIndexFileSnapShot readCommittedIndexFileSnapShot;
   private LoadMetadataDetails[] loadMetadataDetails;
 
-  public LatestFilesReadCommittedScope(String path) {
+  public LatestFilesReadCommittedScope(String path)  {
     this.carbonFilePath = path;
     try {
       takeCarbonIndexFileSnapShot();
@@ -104,6 +106,20 @@ public class LatestFilesReadCommittedScope implements ReadCommittedScope {
     return indexFileStore;
   }
 
+  @Override public SegmentRefreshInfo getCommitedSegmentRefreshInfo(
+      Segment segment, UpdateVO updateVo) throws IOException {
+    Map<String, SegmentRefreshInfo> snapShot =
+        readCommittedIndexFileSnapShot.getSegmentTimestampUpdaterMap();
+    String segName;
+    if (segment.getSegmentNo() != null) {
+      segName = segment.getSegmentNo();
+    } else {
+      segName = segment.getSegmentFileName();
+    }
+    SegmentRefreshInfo segmentRefreshInfo = snapShot.get(segName);
+    return segmentRefreshInfo;
+  }
+
   private String getSegmentID(String carbonIndexFileName, String indexFilePath) {
     if (indexFilePath.contains("/Fact/Part0/Segment_")) {
       // This is CarbonFile case where the Index files are present inside the Segment Folder
@@ -128,6 +144,7 @@ public class LatestFilesReadCommittedScope implements ReadCommittedScope {
       throw new IOException("No files are present in the table location :" + carbonFilePath);
     }
     Map<String, List<String>> indexFileStore = new HashMap<>();
+    Map<String, SegmentRefreshInfo> segmentTimestampUpdaterMap = new HashMap<>();
     if (file.isDirectory()) {
       CarbonFile[] carbonIndexFiles = SegmentIndexFileStore.getCarbonIndexFiles(carbonFilePath);
       for (int i = 0; i < carbonIndexFiles.length; i++) {
@@ -139,18 +156,29 @@ public class LatestFilesReadCommittedScope implements ReadCommittedScope {
               getSegmentID(carbonIndexFiles[i].getName(), carbonIndexFiles[i].getAbsolutePath());
           // TODO. During Partition table handling, place Segment File Name.
           List<String> indexList;
+          SegmentRefreshInfo segmentRefreshInfo;
           if (indexFileStore.get(segId) == null) {
             indexList = new ArrayList<>(1);
+            segmentRefreshInfo =
+                new SegmentRefreshInfo(carbonIndexFiles[i].getLastModifiedTime(), 0);
+            segmentTimestampUpdaterMap.put(segId, segmentRefreshInfo);
           } else {
             // Entry is already present.
             indexList = indexFileStore.get(segId);
+            segmentRefreshInfo = segmentTimestampUpdaterMap.get(segId);
           }
           indexList.add(carbonIndexFiles[i].getAbsolutePath());
+          if (segmentRefreshInfo.getSegmentUpdatedTimestamp() < carbonIndexFiles[i]
+              .getLastModifiedTime()) {
+            segmentRefreshInfo
+                .setSegmentUpdatedTimestamp(carbonIndexFiles[i].getLastModifiedTime());
+          }
           indexFileStore.put(segId, indexList);
+          segmentRefreshInfo.setCountOfFileInSegment(indexList.size());
         }
       }
       ReadCommittedIndexFileSnapShot readCommittedIndexFileSnapShot =
-          new ReadCommittedIndexFileSnapShot(indexFileStore);
+          new ReadCommittedIndexFileSnapShot(indexFileStore, segmentTimestampUpdaterMap);
       this.readCommittedIndexFileSnapShot = readCommittedIndexFileSnapShot;
       prepareLoadMetadata();
     } else {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f1a6c7cf/core/src/main/java/org/apache/carbondata/core/readcommitter/ReadCommittedIndexFileSnapShot.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/readcommitter/ReadCommittedIndexFileSnapShot.java b/core/src/main/java/org/apache/carbondata/core/readcommitter/ReadCommittedIndexFileSnapShot.java
index 3e8e04f..70ca6ec 100644
--- a/core/src/main/java/org/apache/carbondata/core/readcommitter/ReadCommittedIndexFileSnapShot.java
+++ b/core/src/main/java/org/apache/carbondata/core/readcommitter/ReadCommittedIndexFileSnapShot.java
@@ -23,6 +23,7 @@ import java.util.Map;
 
 import org.apache.carbondata.common.annotations.InterfaceAudience;
 import org.apache.carbondata.common.annotations.InterfaceStability;
+import org.apache.carbondata.core.statusmanager.SegmentRefreshInfo;
 
 /**
  * This class is going to save the the Index files which are taken snapshot
@@ -36,12 +37,19 @@ public class ReadCommittedIndexFileSnapShot implements Serializable {
    * Segment Numbers are mapped with list of Index Files.
    */
   private Map<String, List<String>> segmentIndexFileMap;
+  private Map<String, SegmentRefreshInfo> segmentTimestampUpdaterMap;
 
-  public ReadCommittedIndexFileSnapShot(Map<String, List<String>> segmentIndexFileMap) {
+  public ReadCommittedIndexFileSnapShot(Map<String, List<String>> segmentIndexFileMap,
+      Map<String, SegmentRefreshInfo> segmentTimestampUpdaterMap) {
     this.segmentIndexFileMap = segmentIndexFileMap;
+    this.segmentTimestampUpdaterMap = segmentTimestampUpdaterMap;
   }
 
   public Map<String, List<String>> getSegmentIndexFileMap() {
     return segmentIndexFileMap;
   }
+
+  public Map<String, SegmentRefreshInfo> getSegmentTimestampUpdaterMap() {
+    return segmentTimestampUpdaterMap;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f1a6c7cf/core/src/main/java/org/apache/carbondata/core/readcommitter/ReadCommittedScope.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/readcommitter/ReadCommittedScope.java b/core/src/main/java/org/apache/carbondata/core/readcommitter/ReadCommittedScope.java
index 9ae462b..6ff4b89 100644
--- a/core/src/main/java/org/apache/carbondata/core/readcommitter/ReadCommittedScope.java
+++ b/core/src/main/java/org/apache/carbondata/core/readcommitter/ReadCommittedScope.java
@@ -23,7 +23,9 @@ import java.util.Map;
 import org.apache.carbondata.common.annotations.InterfaceAudience;
 import org.apache.carbondata.common.annotations.InterfaceStability;
 import org.apache.carbondata.core.datamap.Segment;
+import org.apache.carbondata.core.mutate.UpdateVO;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
+import org.apache.carbondata.core.statusmanager.SegmentRefreshInfo;
 
 /**
  * ReadCommitted interface that defines a read scope.
@@ -43,5 +45,8 @@ public interface ReadCommittedScope extends Serializable {
    */
   public Map<String, String> getCommittedIndexFile(Segment segment) throws IOException ;
 
+  public SegmentRefreshInfo getCommitedSegmentRefreshInfo(
+      Segment segment, UpdateVO updateVo) throws IOException;
+
   public void takeCarbonIndexFileSnapShot() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f1a6c7cf/core/src/main/java/org/apache/carbondata/core/readcommitter/TableStatusReadCommittedScope.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/readcommitter/TableStatusReadCommittedScope.java b/core/src/main/java/org/apache/carbondata/core/readcommitter/TableStatusReadCommittedScope.java
index 41ce31c..91ebd41 100644
--- a/core/src/main/java/org/apache/carbondata/core/readcommitter/TableStatusReadCommittedScope.java
+++ b/core/src/main/java/org/apache/carbondata/core/readcommitter/TableStatusReadCommittedScope.java
@@ -25,7 +25,9 @@ import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.indexstore.blockletindex.SegmentIndexFileStore;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.metadata.SegmentFileStore;
+import org.apache.carbondata.core.mutate.UpdateVO;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
+import org.apache.carbondata.core.statusmanager.SegmentRefreshInfo;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
@@ -77,6 +79,17 @@ public class TableStatusReadCommittedScope implements ReadCommittedScope {
     return indexFiles;
   }
 
+  public SegmentRefreshInfo getCommitedSegmentRefreshInfo(Segment segment, UpdateVO updateVo)
+      throws IOException {
+    SegmentRefreshInfo segmentRefreshInfo;
+    if (updateVo != null) {
+      segmentRefreshInfo = new SegmentRefreshInfo(updateVo.getCreatedOrUpdatedTimeStamp(), 0);
+    } else {
+      segmentRefreshInfo = new SegmentRefreshInfo(0L, 0);
+    }
+    return segmentRefreshInfo;
+  }
+
   @Override public void takeCarbonIndexFileSnapShot() throws IOException {
     // Only Segment Information is updated.
     // File information will be fetched on the fly according to the fecthed segment info.

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f1a6c7cf/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentRefreshInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentRefreshInfo.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentRefreshInfo.java
new file mode 100644
index 0000000..11fb73f
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentRefreshInfo.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.statusmanager;
+
+import java.io.Serializable;
+
+public class SegmentRefreshInfo implements Serializable {
+
+  private Long segmentUpdatedTimestamp;
+  private Integer countOfFileInSegment;
+
+  public SegmentRefreshInfo(Long segmentUpdatedTimestamp, Integer countOfFileInSegment) {
+    this.segmentUpdatedTimestamp = segmentUpdatedTimestamp;
+    this.countOfFileInSegment = countOfFileInSegment;
+  }
+
+  public Long getSegmentUpdatedTimestamp() {
+    return segmentUpdatedTimestamp;
+  }
+
+  public void setSegmentUpdatedTimestamp(Long segmentUpdatedTimestamp) {
+    this.segmentUpdatedTimestamp = segmentUpdatedTimestamp;
+  }
+
+  public Integer getCountOfFileInSegment() {
+    return countOfFileInSegment;
+  }
+
+  public void setCountOfFileInSegment(Integer countOfFileInSegment) {
+    this.countOfFileInSegment = countOfFileInSegment;
+  }
+
+  public boolean compare(Object o) {
+    if (!(o instanceof SegmentRefreshInfo)) return false;
+
+    SegmentRefreshInfo that = (SegmentRefreshInfo) o;
+
+    if (segmentUpdatedTimestamp > that.segmentUpdatedTimestamp || !countOfFileInSegment
+        .equals(that.countOfFileInSegment)) {
+      return true;
+    }
+    return false;
+  }
+
+  @Override public int hashCode() {
+    int result = segmentUpdatedTimestamp.hashCode();
+    result = 31 * result + countOfFileInSegment.hashCode();
+    return result;
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f1a6c7cf/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
index a32e17a..1db3138 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
@@ -230,13 +230,13 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
             readCommittedScope);
     // Clean the updated segments from memory if the update happens on segments
     List<Segment> toBeCleanedSegments = new ArrayList<>();
-    for (SegmentUpdateDetails segmentUpdateDetail : updateStatusManager
-        .getUpdateStatusDetails()) {
+    for (Segment filteredSegment : filteredSegmentToAccess) {
       boolean refreshNeeded =
           DataMapStoreManager.getInstance().getTableSegmentRefresher(carbonTable)
-              .isRefreshNeeded(segmentUpdateDetail.getSegmentName(), updateStatusManager);
+              .isRefreshNeeded(filteredSegment,
+                  updateStatusManager.getInvalidTimestampRange(filteredSegment.getSegmentNo()));
       if (refreshNeeded) {
-        toBeCleanedSegments.add(new Segment(segmentUpdateDetail.getSegmentName(), null));
+        toBeCleanedSegments.add(filteredSegment);
       }
     }
     // Clean segments if refresh is needed
@@ -246,6 +246,8 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
         toBeCleanedSegments.add(segment);
       }
     }
+
+
     if (toBeCleanedSegments.size() > 0) {
       DataMapStoreManager.getInstance()
           .clearInvalidSegments(getOrCreateCarbonTable(job.getConfiguration()),

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f1a6c7cf/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index 86fda21..58ce5fa 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -198,6 +198,40 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     }
   }
 
+  // prepare sdk writer output
+  def buildTestDataWithSameUUID(rows: Int,
+      persistSchema: Boolean,
+      options: util.Map[String, String],
+      sortColumns: List[String]): Any = {
+    val schema = new StringBuilder()
+      .append("[ \n")
+      .append("   {\"name\":\"string\"},\n")
+      .append("   {\"age\":\"int\"},\n")
+      .append("   {\"height\":\"double\"}\n")
+      .append("]")
+      .toString()
+
+    try {
+      val builder = CarbonWriter.builder()
+      val writer =
+        builder.withSchema(Schema.parseJson(schema)).outputPath(writerPath)
+          .isTransactionalTable(false)
+          .sortBy(sortColumns.toArray)
+          .uniqueIdentifier(
+            123).withBlockSize(2)
+          .buildWriterForCSVInput()
+      var i = 0
+      while (i < rows) {
+        writer.write(Array[String]("robot" + i, String.valueOf(i), String.valueOf(i.toDouble / 2)))
+        i += 1
+      }
+      writer.close()
+    } catch {
+      case ex: Exception => throw new RuntimeException(ex)
+
+      case _ => None
+    }
+  }
 
   def cleanTestData() = {
     FileUtils.deleteDirectory(new File(writerPath))
@@ -229,6 +263,44 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS sdkOutputTable")
   }
 
+  test(
+    "Read two sdk writer outputs before and after deleting the existing files and creating new " +
+    "files with same schema and UUID") {
+    FileUtils.deleteDirectory(new File(writerPath))
+    buildTestDataWithSameUUID(3, false, null, List("name"))
+    assert(new File(writerPath).exists())
+
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+
+    checkAnswer(sql("select * from sdkOutputTable"), Seq(
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0)))
+    new File(writerPath).listFiles().map(x => LOGGER.audit(x.getName +" : "+x.lastModified()))
+    FileUtils.deleteDirectory(new File(writerPath))
+    // Thread.sleep is required because it is possible sometime deletion
+    // and creation of new file can happen at same timestamp.
+    Thread.sleep(1000)
+    assert(!new File(writerPath).exists())
+    buildTestDataWithSameUUID(4, false, null, List("name"))
+    new File(writerPath).listFiles().map(x => LOGGER.audit(x.getName +" : "+x.lastModified()))
+    checkAnswer(sql("select * from sdkOutputTable"), Seq(
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0),
+      Row("robot3", 3, 1.5)))
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    assert(new File(writerPath).exists())
+    cleanTestData()
+  }
+
   test("test create external table with sort columns") {
     buildTestDataWithSortColumns()
     assert(new File(writerPath).exists())
@@ -638,9 +710,40 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       Row("robot1", 1, 0.5),
       Row("robot2", 2, 1.0)))
 
+    buildTestDataWithSameUUID(3, false, null, List("name"))
+
+    checkAnswer(sql("select * from sdkOutputTable"), Seq(
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0),
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0),
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0)))
+
+    buildTestDataWithSameUUID(3, false, null, List("name"))
+
+    checkAnswer(sql("select * from sdkOutputTable"), Seq(
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0),
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0),
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0),
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0)))
+
     //test filter query
     checkAnswer(sql("select * from sdkOutputTable where age = 1"), Seq(
       Row("robot1", 1, 0.5),
+      Row("robot1", 1, 0.5),
+      Row("robot1", 1, 0.5),
       Row("robot1", 1, 0.5)))
 
     // test the default sort column behavior in Nontransactional table
@@ -653,6 +756,39 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
+  test(
+    "Read two sdk writer outputs before and after deleting the existing files and creating new " +
+    "files with same schema") {
+    buildTestDataSingleFile()
+    assert(new File(writerPath).exists())
+
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+
+    checkAnswer(sql("select * from sdkOutputTable"), Seq(
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0)))
+
+    FileUtils.deleteDirectory(new File(writerPath))
+    buildTestData(4, false, null)
+
+    checkAnswer(sql("select * from sdkOutputTable"), Seq(
+      Row("robot0", 0, 0.0),
+      Row("robot1", 1, 0.5),
+      Row("robot2", 2, 1.0),
+      Row("robot3", 3, 1.5)))
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    assert(new File(writerPath).exists())
+    cleanTestData()
+  }
+
   test("test bad records form sdk writer") {
 
     //1. Action = FORCE


[27/50] [abbrv] carbondata git commit: [CARBONDATA-2474] Support Modular Plan for Materialized View DataMap

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/Tpcds_1_4_QueryBatch.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/Tpcds_1_4_QueryBatch.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/Tpcds_1_4_QueryBatch.scala
new file mode 100644
index 0000000..8262dfa
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/Tpcds_1_4_QueryBatch.scala
@@ -0,0 +1,4293 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.testutil
+
+object Tpcds_1_4_QueryBatch {
+
+  // should be random generated based on scale
+  // RC=ulist(random(1, rowcount("store_sales")/5,uniform),5);
+  val rc = Array(1000000, 1000000, 1000000, 1000000, 1000000)
+
+  // Queries the TPCDS 1.4 queries using the qualifcations values in the templates.
+
+  val tpcds1_4Queries = Seq(
+    ("q1",
+      """
+        | WITH customer_total_return AS
+        |   (SELECT sr_customer_sk AS ctr_customer_sk, sr_store_sk AS ctr_store_sk,
+        |           sum(sr_return_amt) AS ctr_total_return
+        |    FROM store_returns, date_dim
+        |    WHERE sr_returned_date_sk = d_date_sk AND d_year = 2000
+        |    GROUP BY sr_customer_sk, sr_store_sk)
+        | SELECT c_customer_id
+        |   FROM customer_total_return ctr1, store, customer
+        |   WHERE ctr1.ctr_total_return >
+        |    (SELECT avg(ctr_total_return)*1.2
+        |      FROM customer_total_return ctr2
+        |       WHERE ctr1.ctr_store_sk = ctr2.ctr_store_sk)
+        |   AND s_store_sk = ctr1.ctr_store_sk
+        |   AND s_state = 'TN'
+        |   AND ctr1.ctr_customer_sk = c_customer_sk
+        |   ORDER BY c_customer_id LIMIT 100
+      """.stripMargin),
+    ("q2",
+      """
+        | WITH wscs as
+        | (SELECT sold_date_sk, sales_price
+        |  FROM (SELECT ws_sold_date_sk sold_date_sk, ws_ext_sales_price sales_price
+        |        FROM web_sales) x
+        |        UNION ALL
+        |       (SELECT cs_sold_date_sk sold_date_sk, cs_ext_sales_price sales_price
+        |        FROM catalog_sales)),
+        | wswscs AS
+        | (SELECT d_week_seq,
+        |        sum(case when (d_day_name='Sunday') then sales_price else null end) sun_sales,
+        |        sum(case when (d_day_name='Monday') then sales_price else null end) mon_sales,
+        |        sum(case when (d_day_name='Tuesday') then sales_price else  null end) tue_sales,
+        |        sum(case when (d_day_name='Wednesday') then sales_price else null end) wed_sales,
+        |        sum(case when (d_day_name='Thursday') then sales_price else null end) thu_sales,
+        |        sum(case when (d_day_name='Friday') then sales_price else null end) fri_sales,
+        |        sum(case when (d_day_name='Saturday') then sales_price else null end) sat_sales
+        | FROM wscs, date_dim
+        | WHERE d_date_sk = sold_date_sk
+        | GROUP BY d_week_seq)
+        | SELECT d_week_seq1
+        |       ,round(sun_sales1/sun_sales2,2)
+        |       ,round(mon_sales1/mon_sales2,2)
+        |       ,round(tue_sales1/tue_sales2,2)
+        |       ,round(wed_sales1/wed_sales2,2)
+        |       ,round(thu_sales1/thu_sales2,2)
+        |       ,round(fri_sales1/fri_sales2,2)
+        |       ,round(sat_sales1/sat_sales2,2)
+        | FROM
+        | (SELECT wswscs.d_week_seq d_week_seq1
+        |        ,sun_sales sun_sales1
+        |        ,mon_sales mon_sales1
+        |        ,tue_sales tue_sales1
+        |        ,wed_sales wed_sales1
+        |        ,thu_sales thu_sales1
+        |        ,fri_sales fri_sales1
+        |        ,sat_sales sat_sales1
+        |  FROM wswscs,date_dim
+        |  WHERE date_dim.d_week_seq = wswscs.d_week_seq AND d_year = 2001) y,
+        | (SELECT wswscs.d_week_seq d_week_seq2
+        |        ,sun_sales sun_sales2
+        |        ,mon_sales mon_sales2
+        |        ,tue_sales tue_sales2
+        |        ,wed_sales wed_sales2
+        |        ,thu_sales thu_sales2
+        |        ,fri_sales fri_sales2
+        |        ,sat_sales sat_sales2
+        |  FROM wswscs, date_dim
+        |  WHERE date_dim.d_week_seq = wswscs.d_week_seq AND d_year = 2001 + 1) z
+        | WHERE d_week_seq1=d_week_seq2-53
+        | ORDER BY d_week_seq1
+      """.stripMargin),
+    ("q3",
+      """
+        | SELECT dt.d_year, item.i_brand_id brand_id, item.i_brand brand,SUM(ss_ext_sales_price)
+        | sum_agg
+        | FROM  date_dim dt, store_sales, item
+        | WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
+        |   AND store_sales.ss_item_sk = item.i_item_sk
+        |   AND item.i_manufact_id = 128
+        |   AND dt.d_moy=11
+        | GROUP BY dt.d_year, item.i_brand, item.i_brand_id
+        | ORDER BY dt.d_year, sum_agg desc, brand_id
+        | LIMIT 100
+      """.stripMargin),
+    ("q4",
+      """
+        |WITH year_total AS (
+        | SELECT c_customer_id customer_id,
+        |        c_first_name customer_first_name,
+        |        c_last_name customer_last_name,
+        |        c_preferred_cust_flag customer_preferred_cust_flag,
+        |        c_birth_country customer_birth_country,
+        |        c_login customer_login,
+        |        c_email_address customer_email_address,
+        |        d_year dyear,
+        |        sum(((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)
+        |        +ss_ext_sales_price)/2) year_total,
+        |        's' sale_type
+        | FROM customer, store_sales, date_dim
+        | WHERE c_customer_sk = ss_customer_sk AND ss_sold_date_sk = d_date_sk
+        | GROUP BY c_customer_id,
+        |          c_first_name,
+        |          c_last_name,
+        |          c_preferred_cust_flag,
+        |          c_birth_country,
+        |          c_login,
+        |          c_email_address,
+        |          d_year
+        | UNION ALL
+        | SELECT c_customer_id customer_id,
+        |        c_first_name customer_first_name,
+        |        c_last_name customer_last_name,
+        |        c_preferred_cust_flag customer_preferred_cust_flag,
+        |        c_birth_country customer_birth_country,
+        |        c_login customer_login,
+        |        c_email_address customer_email_address,
+        |        d_year dyear,
+        |        sum((((cs_ext_list_price-cs_ext_wholesale_cost-cs_ext_discount_amt)
+        |        +cs_ext_sales_price)/2) ) year_total,
+        |        'c' sale_type
+        | FROM customer, catalog_sales, date_dim
+        | WHERE c_customer_sk = cs_bill_customer_sk AND cs_sold_date_sk = d_date_sk
+        | GROUP BY c_customer_id,
+        |          c_first_name,
+        |          c_last_name,
+        |          c_preferred_cust_flag,
+        |          c_birth_country,
+        |          c_login,
+        |          c_email_address,
+        |          d_year
+        | UNION ALL
+        | SELECT c_customer_id customer_id
+        |       ,c_first_name customer_first_name
+        |       ,c_last_name customer_last_name
+        |       ,c_preferred_cust_flag customer_preferred_cust_flag
+        |       ,c_birth_country customer_birth_country
+        |       ,c_login customer_login
+        |       ,c_email_address customer_email_address
+        |       ,d_year dyear
+        |       ,sum((((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)
+        |       +ws_ext_sales_price)/2) ) year_total
+        |       ,'w' sale_type
+        | FROM customer, web_sales, date_dim
+        | WHERE c_customer_sk = ws_bill_customer_sk AND ws_sold_date_sk = d_date_sk
+        | GROUP BY c_customer_id,
+        |          c_first_name,
+        |          c_last_name,
+        |          c_preferred_cust_flag,
+        |          c_birth_country,
+        |          c_login,
+        |          c_email_address,
+        |          d_year)
+        | SELECT
+        |   t_s_secyear.customer_id,
+        |   t_s_secyear.customer_first_name,
+        |   t_s_secyear.customer_last_name,
+        |   t_s_secyear.customer_preferred_cust_flag,
+        |   t_s_secyear.customer_birth_country,
+        |   t_s_secyear.customer_login,
+        |   t_s_secyear.customer_email_address
+        | FROM year_total t_s_firstyear, year_total t_s_secyear, year_total t_c_firstyear,
+        |      year_total t_c_secyear, year_total t_w_firstyear, year_total t_w_secyear
+        | WHERE t_s_secyear.customer_id = t_s_firstyear.customer_id
+        |   and t_s_firstyear.customer_id = t_c_secyear.customer_id
+        |   and t_s_firstyear.customer_id = t_c_firstyear.customer_id
+        |   and t_s_firstyear.customer_id = t_w_firstyear.customer_id
+        |   and t_s_firstyear.customer_id = t_w_secyear.customer_id
+        |   and t_s_firstyear.sale_type = 's'
+        |   and t_c_firstyear.sale_type = 'c'
+        |   and t_w_firstyear.sale_type = 'w'
+        |   and t_s_secyear.sale_type = 's'
+        |   and t_c_secyear.sale_type = 'c'
+        |   and t_w_secyear.sale_type = 'w'
+        |   and t_s_firstyear.dyear = 2001
+        |   and t_s_secyear.dyear = 2001+1
+        |   and t_c_firstyear.dyear = 2001
+        |   and t_c_secyear.dyear = 2001+1
+        |   and t_w_firstyear.dyear = 2001
+        |   and t_w_secyear.dyear = 2001+1
+        |   and t_s_firstyear.year_total > 0
+        |   and t_c_firstyear.year_total > 0
+        |   and t_w_firstyear.year_total > 0
+        |   and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total /
+        |   t_c_firstyear.year_total else null end
+        |           > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total /
+        |           t_s_firstyear.year_total else null end
+        |   and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total /
+        |   t_c_firstyear.year_total else null end
+        |           > case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total /
+        |           t_w_firstyear.year_total else null end
+        | ORDER BY
+        |   t_s_secyear.customer_id,
+        |   t_s_secyear.customer_first_name,
+        |   t_s_secyear.customer_last_name,
+        |   t_s_secyear.customer_preferred_cust_flag,
+        |   t_s_secyear.customer_birth_country,
+        |   t_s_secyear.customer_login,
+        |   t_s_secyear.customer_email_address
+        | LIMIT 100
+      """.stripMargin),
+    // Modifications: "+ days" -> date_add
+    // Modifications: "||" -> concat
+    ("q5",
+      """
+        | WITH ssr AS
+        |  (SELECT s_store_id,
+        |          sum(sales_price) as sales,
+        |          sum(profit) as profit,
+        |          sum(return_amt) as returns,
+        |          sum(net_loss) as profit_loss
+        |  FROM
+        |    (SELECT ss_store_sk as store_sk,
+        |            ss_sold_date_sk  as date_sk,
+        |            ss_ext_sales_price as sales_price,
+        |            ss_net_profit as profit,
+        |            cast(0 as decimal(7,2)) as return_amt,
+        |            cast(0 as decimal(7,2)) as net_loss
+        |    FROM store_sales
+        |    UNION ALL
+        |    SELECT sr_store_sk as store_sk,
+        |           sr_returned_date_sk as date_sk,
+        |           cast(0 as decimal(7,2)) as sales_price,
+        |           cast(0 as decimal(7,2)) as profit,
+        |           sr_return_amt as return_amt,
+        |           sr_net_loss as net_loss
+        |    FROM store_returns)
+        |    salesreturns, date_dim, store
+        |  WHERE date_sk = d_date_sk
+        |       and d_date between cast('2000-08-23' as date)
+        |                  and ((cast('2000-08-23' as date) + interval 14 days))
+        |       and store_sk = s_store_sk
+        | GROUP BY s_store_id),
+        | csr AS
+        | (SELECT cp_catalog_page_id,
+        |         sum(sales_price) as sales,
+        |         sum(profit) as profit,
+        |         sum(return_amt) as returns,
+        |         sum(net_loss) as profit_loss
+        | FROM
+        |   (SELECT cs_catalog_page_sk as page_sk,
+        |           cs_sold_date_sk  as date_sk,
+        |           cs_ext_sales_price as sales_price,
+        |           cs_net_profit as profit,
+        |           cast(0 as decimal(7,2)) as return_amt,
+        |           cast(0 as decimal(7,2)) as net_loss
+        |    FROM catalog_sales
+        |    UNION ALL
+        |    SELECT cr_catalog_page_sk as page_sk,
+        |           cr_returned_date_sk as date_sk,
+        |           cast(0 as decimal(7,2)) as sales_price,
+        |           cast(0 as decimal(7,2)) as profit,
+        |           cr_return_amount as return_amt,
+        |           cr_net_loss as net_loss
+        |    from catalog_returns
+        |   ) salesreturns, date_dim, catalog_page
+        | WHERE date_sk = d_date_sk
+        |       and d_date between cast('2000-08-23' as date)
+        |                  and ((cast('2000-08-23' as date) + interval 14 days))
+        |       and page_sk = cp_catalog_page_sk
+        | GROUP BY cp_catalog_page_id)
+        | ,
+        | wsr AS
+        | (SELECT web_site_id,
+        |         sum(sales_price) as sales,
+        |         sum(profit) as profit,
+        |         sum(return_amt) as returns,
+        |         sum(net_loss) as profit_loss
+        | from
+        |  (select  ws_web_site_sk as wsr_web_site_sk,
+        |            ws_sold_date_sk  as date_sk,
+        |            ws_ext_sales_price as sales_price,
+        |            ws_net_profit as profit,
+        |            cast(0 as decimal(7,2)) as return_amt,
+        |            cast(0 as decimal(7,2)) as net_loss
+        |    from web_sales
+        |    union all
+        |    select ws_web_site_sk as wsr_web_site_sk,
+        |           wr_returned_date_sk as date_sk,
+        |           cast(0 as decimal(7,2)) as sales_price,
+        |           cast(0 as decimal(7,2)) as profit,
+        |           wr_return_amt as return_amt,
+        |           wr_net_loss as net_loss
+        |    FROM web_returns LEFT  OUTER JOIN web_sales on
+        |         ( wr_item_sk = ws_item_sk
+        |           and wr_order_number = ws_order_number)
+        |   ) salesreturns, date_dim, web_site
+        | WHERE date_sk = d_date_sk
+        |       and d_date between cast('2000-08-23' as date)
+        |                  and ((cast('2000-08-23' as date) + interval 14 days))
+        |       and wsr_web_site_sk = web_site_sk
+        | GROUP BY web_site_id)
+        | SELECT channel,
+        |        id,
+        |        sum(sales) as sales,
+        |        sum(returns) as returns,
+        |        sum(profit) as profit
+        | from
+        | (select 'store channel' as channel,
+        |         concat('store', s_store_id) as id,
+        |         sales,
+        |         returns,
+        |        (profit - profit_loss) as profit
+        | FROM ssr
+        | UNION ALL
+        | select 'catalog channel' as channel,
+        |        concat('catalog_page', cp_catalog_page_id) as id,
+        |        sales,
+        |        returns,
+        |        (profit - profit_loss) as profit
+        | FROM  csr
+        | UNION ALL
+        | SELECT 'web channel' as channel,
+        |        concat('web_site', web_site_id) as id,
+        |        sales,
+        |        returns,
+        |        (profit - profit_loss) as profit
+        | FROM wsr
+        | ) x
+        | GROUP BY ROLLUP (channel, id)
+        | ORDER BY channel, id
+        | LIMIT 100
+      """.stripMargin),
+    ("q6",
+      """
+        | SELECT a.ca_state state, count(*) cnt
+        | FROM
+        |    customer_address a, customer c, store_sales s, date_dim d, item i
+        | WHERE a.ca_address_sk = c.c_current_addr_sk
+        |   AND c.c_customer_sk = s.ss_customer_sk
+        |   AND s.ss_sold_date_sk = d.d_date_sk
+        |   AND s.ss_item_sk = i.i_item_sk
+        |   AND d.d_month_seq =
+        |        (SELECT distinct (d_month_seq) FROM date_dim
+        |        WHERE d_year = 2000 AND d_moy = 1)
+        |   AND i.i_current_price > 1.2 *
+        |             (SELECT avg(j.i_current_price) FROM item j
+        |               WHERE j.i_category = i.i_category)
+        | GROUP BY a.ca_state
+        | HAVING count(*) >= 10
+        | ORDER BY cnt LIMIT 100
+      """.stripMargin),
+    ("q7",
+      """
+        | SELECT i_item_id,
+        |        avg(ss_quantity) agg1,
+        |        avg(ss_list_price) agg2,
+        |        avg(ss_coupon_amt) agg3,
+        |        avg(ss_sales_price) agg4
+        | FROM store_sales, customer_demographics, date_dim, item, promotion
+        | WHERE ss_sold_date_sk = d_date_sk AND
+        |       ss_item_sk = i_item_sk AND
+        |       ss_cdemo_sk = cd_demo_sk AND
+        |       ss_promo_sk = p_promo_sk AND
+        |       cd_gender = 'M' AND
+        |       cd_marital_status = 'S' AND
+        |       cd_education_status = 'College' AND
+        |       (p_channel_email = 'N' or p_channel_event = 'N') AND
+        |       d_year = 2000
+        | GROUP BY i_item_id
+        | ORDER BY i_item_id LIMIT 100
+      """.stripMargin),
+    ("q8",
+      """
+        | select s_store_name, sum(ss_net_profit)
+        | from store_sales, date_dim, store,
+        |     (SELECT ca_zip
+        |       from (
+        |       (SELECT substr(ca_zip,1,5) ca_zip FROM customer_address
+        |          WHERE substr(ca_zip,1,5) IN (
+        |               '24128','76232','65084','87816','83926','77556','20548',
+        |               '26231','43848','15126','91137','61265','98294','25782',
+        |               '17920','18426','98235','40081','84093','28577','55565',
+        |               '17183','54601','67897','22752','86284','18376','38607',
+        |               '45200','21756','29741','96765','23932','89360','29839',
+        |               '25989','28898','91068','72550','10390','18845','47770',
+        |               '82636','41367','76638','86198','81312','37126','39192',
+        |               '88424','72175','81426','53672','10445','42666','66864',
+        |               '66708','41248','48583','82276','18842','78890','49448',
+        |               '14089','38122','34425','79077','19849','43285','39861',
+        |               '66162','77610','13695','99543','83444','83041','12305',
+        |               '57665','68341','25003','57834','62878','49130','81096',
+        |               '18840','27700','23470','50412','21195','16021','76107',
+        |               '71954','68309','18119','98359','64544','10336','86379',
+        |               '27068','39736','98569','28915','24206','56529','57647',
+        |               '54917','42961','91110','63981','14922','36420','23006',
+        |               '67467','32754','30903','20260','31671','51798','72325',
+        |               '85816','68621','13955','36446','41766','68806','16725',
+        |               '15146','22744','35850','88086','51649','18270','52867',
+        |               '39972','96976','63792','11376','94898','13595','10516',
+        |               '90225','58943','39371','94945','28587','96576','57855',
+        |               '28488','26105','83933','25858','34322','44438','73171',
+        |               '30122','34102','22685','71256','78451','54364','13354',
+        |               '45375','40558','56458','28286','45266','47305','69399',
+        |               '83921','26233','11101','15371','69913','35942','15882',
+        |               '25631','24610','44165','99076','33786','70738','26653',
+        |               '14328','72305','62496','22152','10144','64147','48425',
+        |               '14663','21076','18799','30450','63089','81019','68893',
+        |               '24996','51200','51211','45692','92712','70466','79994',
+        |               '22437','25280','38935','71791','73134','56571','14060',
+        |               '19505','72425','56575','74351','68786','51650','20004',
+        |               '18383','76614','11634','18906','15765','41368','73241',
+        |               '76698','78567','97189','28545','76231','75691','22246',
+        |               '51061','90578','56691','68014','51103','94167','57047',
+        |               '14867','73520','15734','63435','25733','35474','24676',
+        |               '94627','53535','17879','15559','53268','59166','11928',
+        |               '59402','33282','45721','43933','68101','33515','36634',
+        |               '71286','19736','58058','55253','67473','41918','19515',
+        |               '36495','19430','22351','77191','91393','49156','50298',
+        |               '87501','18652','53179','18767','63193','23968','65164',
+        |               '68880','21286','72823','58470','67301','13394','31016',
+        |               '70372','67030','40604','24317','45748','39127','26065',
+        |               '77721','31029','31880','60576','24671','45549','13376',
+        |               '50016','33123','19769','22927','97789','46081','72151',
+        |               '15723','46136','51949','68100','96888','64528','14171',
+        |               '79777','28709','11489','25103','32213','78668','22245',
+        |               '15798','27156','37930','62971','21337','51622','67853',
+        |               '10567','38415','15455','58263','42029','60279','37125',
+        |               '56240','88190','50308','26859','64457','89091','82136',
+        |               '62377','36233','63837','58078','17043','30010','60099',
+        |               '28810','98025','29178','87343','73273','30469','64034',
+        |               '39516','86057','21309','90257','67875','40162','11356',
+        |               '73650','61810','72013','30431','22461','19512','13375',
+        |               '55307','30625','83849','68908','26689','96451','38193',
+        |               '46820','88885','84935','69035','83144','47537','56616',
+        |               '94983','48033','69952','25486','61547','27385','61860',
+        |               '58048','56910','16807','17871','35258','31387','35458',
+        |               '35576'))
+        |       INTERSECT
+        |       (select ca_zip
+        |          FROM
+        |            (SELECT substr(ca_zip,1,5) ca_zip,count(*) cnt
+        |              FROM customer_address, customer
+        |              WHERE ca_address_sk = c_current_addr_sk and
+        |                    c_preferred_cust_flag='Y'
+        |              group by ca_zip
+        |              having count(*) > 10) A1)
+        |         ) A2
+        |      ) V1
+        | where ss_store_sk = s_store_sk
+        |  and ss_sold_date_sk = d_date_sk
+        |  and d_qoy = 2 and d_year = 1998
+        |  and (substr(s_zip,1,2) = substr(V1.ca_zip,1,2))
+        | group by s_store_name
+        | order by s_store_name LIMIT 100
+      """.stripMargin),
+    ("q9",
+      s"""
+         |select case when (select count(*) from store_sales
+         |                  where ss_quantity between 1 and 20) > ${ rc(0) }
+         |            then (select avg(ss_ext_discount_amt) from store_sales
+         |                  where ss_quantity between 1 and 20)
+         |            else (select avg(ss_net_paid) from store_sales
+         |                  where ss_quantity between 1 and 20) end bucket1 ,
+         |       case when (select count(*) from store_sales
+         |                  where ss_quantity between 21 and 40) > ${ rc(1) }
+         |            then (select avg(ss_ext_discount_amt) from store_sales
+         |                  where ss_quantity between 21 and 40)
+         |            else (select avg(ss_net_paid) from store_sales
+         |                  where ss_quantity between 21 and 40) end bucket2,
+         |       case when (select count(*) from store_sales
+         |                  where ss_quantity between 41 and 60) > ${ rc(2) }
+         |            then (select avg(ss_ext_discount_amt) from store_sales
+         |                  where ss_quantity between 41 and 60)
+         |            else (select avg(ss_net_paid) from store_sales
+         |                  where ss_quantity between 41 and 60) end bucket3,
+         |       case when (select count(*) from store_sales
+         |                  where ss_quantity between 61 and 80) > ${ rc(3) }
+         |            then (select avg(ss_ext_discount_amt) from store_sales
+         |                  where ss_quantity between 61 and 80)
+         |            else (select avg(ss_net_paid) from store_sales
+         |                  where ss_quantity between 61 and 80) end bucket4,
+         |       case when (select count(*) from store_sales
+         |                  where ss_quantity between 81 and 100) > ${ rc(4) }
+         |            then (select avg(ss_ext_discount_amt) from store_sales
+         |                  where ss_quantity between 81 and 100)
+         |            else (select avg(ss_net_paid) from store_sales
+         |                  where ss_quantity between 81 and 100) end bucket5
+         |from reason
+         |where r_reason_sk = 1
+            """.stripMargin),
+    ("q10",
+      """
+        | select
+        |  cd_gender, cd_marital_status, cd_education_status, count(*) cnt1,
+        |  cd_purchase_estimate, count(*) cnt2, cd_credit_rating, count(*) cnt3,
+        |  cd_dep_count, count(*) cnt4, cd_dep_employed_count,  count(*) cnt5,
+        |  cd_dep_college_count, count(*) cnt6
+        | from
+        |  customer c, customer_address ca, customer_demographics
+        | where
+        |  c.c_current_addr_sk = ca.ca_address_sk and
+        |  ca_county in ('Rush County','Toole County','Jefferson County',
+        |                'Dona Ana County','La Porte County') and
+        |  cd_demo_sk = c.c_current_cdemo_sk AND
+        |  exists (select * from store_sales, date_dim
+        |          where c.c_customer_sk = ss_customer_sk AND
+        |                ss_sold_date_sk = d_date_sk AND
+        |                d_year = 2002 AND
+        |                d_moy between 1 AND 1+3) AND
+        |   (exists (select * from web_sales, date_dim
+        |            where c.c_customer_sk = ws_bill_customer_sk AND
+        |                  ws_sold_date_sk = d_date_sk AND
+        |                  d_year = 2002 AND
+        |                  d_moy between 1 AND 1+3) or
+        |    exists (select * from catalog_sales, date_dim
+        |            where c.c_customer_sk = cs_ship_customer_sk AND
+        |                  cs_sold_date_sk = d_date_sk AND
+        |                  d_year = 2002 AND
+        |                  d_moy between 1 AND 1+3))
+        | group by cd_gender,
+        |          cd_marital_status,
+        |          cd_education_status,
+        |          cd_purchase_estimate,
+        |          cd_credit_rating,
+        |          cd_dep_count,
+        |          cd_dep_employed_count,
+        |          cd_dep_college_count
+        | order by cd_gender,
+        |          cd_marital_status,
+        |          cd_education_status,
+        |          cd_purchase_estimate,
+        |          cd_credit_rating,
+        |          cd_dep_count,
+        |          cd_dep_employed_count,
+        |          cd_dep_college_count
+        |LIMIT 100
+      """.stripMargin),
+    ("q11",
+      """
+        | with year_total as (
+        | select c_customer_id customer_id
+        |       ,c_first_name customer_first_name
+        |       ,c_last_name customer_last_name
+        |       ,c_preferred_cust_flag customer_preferred_cust_flag
+        |       ,c_birth_country customer_birth_country
+        |       ,c_login customer_login
+        |       ,c_email_address customer_email_address
+        |       ,d_year dyear
+        |       ,sum(ss_ext_list_price-ss_ext_discount_amt) year_total
+        |       ,'s' sale_type
+        | from customer, store_sales, date_dim
+        | where c_customer_sk = ss_customer_sk
+        |   and ss_sold_date_sk = d_date_sk
+        | group by c_customer_id
+        |         ,c_first_name
+        |         ,c_last_name
+        |         ,d_year
+        |         ,c_preferred_cust_flag
+        |         ,c_birth_country
+        |         ,c_login
+        |         ,c_email_address
+        |         ,d_year
+        | union all
+        | select c_customer_id customer_id
+        |       ,c_first_name customer_first_name
+        |       ,c_last_name customer_last_name
+        |       ,c_preferred_cust_flag customer_preferred_cust_flag
+        |       ,c_birth_country customer_birth_country
+        |       ,c_login customer_login
+        |       ,c_email_address customer_email_address
+        |       ,d_year dyear
+        |       ,sum(ws_ext_list_price-ws_ext_discount_amt) year_total
+        |       ,'w' sale_type
+        | from customer, web_sales, date_dim
+        | where c_customer_sk = ws_bill_customer_sk
+        |   and ws_sold_date_sk = d_date_sk
+        | group by
+        |    c_customer_id, c_first_name, c_last_name, c_preferred_cust_flag, c_birth_country,
+        |    c_login, c_email_address, d_year)
+        | select
+        |    t_s_secyear.customer_preferred_cust_flag
+        | from year_total t_s_firstyear
+        |     ,year_total t_s_secyear
+        |     ,year_total t_w_firstyear
+        |     ,year_total t_w_secyear
+        | where t_s_secyear.customer_id = t_s_firstyear.customer_id
+        |         and t_s_firstyear.customer_id = t_w_secyear.customer_id
+        |         and t_s_firstyear.customer_id = t_w_firstyear.customer_id
+        |         and t_s_firstyear.sale_type = 's'
+        |         and t_w_firstyear.sale_type = 'w'
+        |         and t_s_secyear.sale_type = 's'
+        |         and t_w_secyear.sale_type = 'w'
+        |         and t_s_firstyear.dyear = 2001
+        |         and t_s_secyear.dyear = 2001+1
+        |         and t_w_firstyear.dyear = 2001
+        |         and t_w_secyear.dyear = 2001+1
+        |         and t_s_firstyear.year_total > 0
+        |         and t_w_firstyear.year_total > 0
+        |         and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total /
+        |         t_w_firstyear.year_total else null end
+        |             > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total /
+        |             t_s_firstyear.year_total else null end
+        | order by t_s_secyear.customer_preferred_cust_flag
+        | LIMIT 100
+      """.stripMargin),
+    // Modifications: "+ days" -> date_add
+    ("q12",
+      """
+        | select
+        |  i_item_desc, i_category, i_class, i_current_price,
+        |  sum(ws_ext_sales_price) as itemrevenue,
+        |  sum(ws_ext_sales_price)*100/sum(sum(ws_ext_sales_price)) over
+        |          (partition by i_class) as revenueratio
+        | from
+        |   web_sales, item, date_dim
+        | where
+        |   ws_item_sk = i_item_sk
+        |   and i_category in ('Sports', 'Books', 'Home')
+        |   and ws_sold_date_sk = d_date_sk
+        |   and d_date between cast('1999-02-22' as date)
+        |                           and (cast('1999-02-22' as date) + interval 30 days)
+        | group by
+        |   i_item_id, i_item_desc, i_category, i_class, i_current_price
+        | order by
+        |   i_category, i_class, i_item_id, i_item_desc, revenueratio
+        | LIMIT 100
+      """.stripMargin),
+    ("q13",
+      """
+        | select avg(ss_quantity)
+        |       ,avg(ss_ext_sales_price)
+        |       ,avg(ss_ext_wholesale_cost)
+        |       ,sum(ss_ext_wholesale_cost)
+        | from store_sales
+        |     ,store
+        |     ,customer_demographics
+        |     ,household_demographics
+        |     ,customer_address
+        |     ,date_dim
+        | where s_store_sk = ss_store_sk
+        | and  ss_sold_date_sk = d_date_sk and d_year = 2001
+        | and((ss_hdemo_sk=hd_demo_sk
+        |  and cd_demo_sk = ss_cdemo_sk
+        |  and cd_marital_status = 'M'
+        |  and cd_education_status = 'Advanced Degree'
+        |  and ss_sales_price between 100.00 and 150.00
+        |  and hd_dep_count = 3
+        |     )or
+        |     (ss_hdemo_sk=hd_demo_sk
+        |  and cd_demo_sk = ss_cdemo_sk
+        |  and cd_marital_status = 'S'
+        |  and cd_education_status = 'College'
+        |  and ss_sales_price between 50.00 and 100.00
+        |  and hd_dep_count = 1
+        |     ) or
+        |     (ss_hdemo_sk=hd_demo_sk
+        |  and cd_demo_sk = ss_cdemo_sk
+        |  and cd_marital_status = 'W'
+        |  and cd_education_status = '2 yr Degree'
+        |  and ss_sales_price between 150.00 and 200.00
+        |  and hd_dep_count = 1
+        |     ))
+        | and((ss_addr_sk = ca_address_sk
+        |  and ca_country = 'United States'
+        |  and ca_state in ('TX', 'OH', 'TX')
+        |  and ss_net_profit between 100 and 200
+        |     ) or
+        |     (ss_addr_sk = ca_address_sk
+        |  and ca_country = 'United States'
+        |  and ca_state in ('OR', 'NM', 'KY')
+        |  and ss_net_profit between 150 and 300
+        |     ) or
+        |     (ss_addr_sk = ca_address_sk
+        |  and ca_country = 'United States'
+        |  and ca_state in ('VA', 'TX', 'MS')
+        |  and ss_net_profit between 50 and 250
+        |     ))
+      """.stripMargin),
+    ("q14a",
+      """
+            |with cross_items as
+            | (select i_item_sk ss_item_sk
+            | from item,
+            |    (select iss.i_brand_id brand_id, iss.i_class_id class_id, iss.i_category_id
+            |    category_id
+            |     from store_sales, item iss, date_dim d1
+            |     where ss_item_sk = iss.i_item_sk
+                    and ss_sold_date_sk = d1.d_date_sk
+            |       and d1.d_year between 1999 AND 1999 + 2
+            |   intersect
+            |     select ics.i_brand_id, ics.i_class_id, ics.i_category_id
+            |     from catalog_sales, item ics, date_dim d2
+            |     where cs_item_sk = ics.i_item_sk
+            |       and cs_sold_date_sk = d2.d_date_sk
+            |       and d2.d_year between 1999 AND 1999 + 2
+            |   intersect
+            |     select iws.i_brand_id, iws.i_class_id, iws.i_category_id
+            |     from web_sales, item iws, date_dim d3
+            |     where ws_item_sk = iws.i_item_sk
+            |       and ws_sold_date_sk = d3.d_date_sk
+            |       and d3.d_year between 1999 AND 1999 + 2) x
+            | where i_brand_id = brand_id
+            |   and i_class_id = class_id
+            |   and i_category_id = category_id
+            |),
+            | avg_sales as
+            | (select avg(quantity*list_price) average_sales
+            |  from (
+            |     select ss_quantity quantity, ss_list_price list_price
+            |     from store_sales, date_dim
+            |     where ss_sold_date_sk = d_date_sk
+            |       and d_year between 1999 and 2001
+            |   union all
+            |     select cs_quantity quantity, cs_list_price list_price
+            |     from catalog_sales, date_dim
+            |     where cs_sold_date_sk = d_date_sk
+            |       and d_year between 1999 and 1999 + 2
+            |   union all
+            |     select ws_quantity quantity, ws_list_price list_price
+            |     from web_sales, date_dim
+            |     where ws_sold_date_sk = d_date_sk
+            |       and d_year between 1999 and 1999 + 2) x)
+            | select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales)
+            | from(
+            |     select 'store' channel, i_brand_id,i_class_id
+            |             ,i_category_id,sum(ss_quantity*ss_list_price) sales
+            |             , count(*) number_sales
+            |     from store_sales, item, date_dim
+            |     where ss_item_sk in (select ss_item_sk from cross_items)
+            |       and ss_item_sk = i_item_sk
+            |       and ss_sold_date_sk = d_date_sk
+            |       and d_year = 1999+2
+            |       and d_moy = 11
+            |     group by i_brand_id,i_class_id,i_category_id
+            |     having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)
+            |   union all
+            |     select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum
+            |     (cs_quantity*cs_list_price) sales, count(*) number_sales
+            |     from catalog_sales, item, date_dim
+            |     where cs_item_sk in (select ss_item_sk from cross_items)
+            |       and cs_item_sk = i_item_sk
+            |       and cs_sold_date_sk = d_date_sk
+            |       and d_year = 1999+2
+            |       and d_moy = 11
+            |     group by i_brand_id,i_class_id,i_category_id
+            |     having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales)
+            |   union all
+            |     select 'web' channel, i_brand_id,i_class_id,i_category_id, sum
+            |     (ws_quantity*ws_list_price) sales , count(*) number_sales
+            |     from web_sales, item, date_dim
+            |     where ws_item_sk in (select ss_item_sk from cross_items)
+            |       and ws_item_sk = i_item_sk
+            |       and ws_sold_date_sk = d_date_sk
+            |       and d_year = 1999+2
+            |       and d_moy = 11
+            |     group by i_brand_id,i_class_id,i_category_id
+            |     having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales)
+            | ) y
+            | group by rollup (channel, i_brand_id,i_class_id,i_category_id)
+            | order by channel,i_brand_id,i_class_id,i_category_id
+            | limit 100
+            """.stripMargin),
+    ("q14b",
+      """
+        | with  cross_items as
+        | (select i_item_sk ss_item_sk
+        |  from item,
+        |     (select iss.i_brand_id brand_id, iss.i_class_id class_id, iss.i_category_id
+        |     category_id
+        |      from store_sales, item iss, date_dim d1
+        |      where ss_item_sk = iss.i_item_sk
+        |         and ss_sold_date_sk = d1.d_date_sk
+        |         and d1.d_year between 1999 AND 1999 + 2
+        |     intersect
+        |       select ics.i_brand_id, ics.i_class_id, ics.i_category_id
+        |       from catalog_sales, item ics, date_dim d2
+        |       where cs_item_sk = ics.i_item_sk
+        |         and cs_sold_date_sk = d2.d_date_sk
+        |         and d2.d_year between 1999 AND 1999 + 2
+        |     intersect
+        |       select iws.i_brand_id, iws.i_class_id, iws.i_category_id
+        |       from web_sales, item iws, date_dim d3
+        |       where ws_item_sk = iws.i_item_sk
+        |         and ws_sold_date_sk = d3.d_date_sk
+        |         and d3.d_year between 1999 AND 1999 + 2) x
+        |  where i_brand_id = brand_id
+        |    and i_class_id = class_id
+        |    and i_category_id = category_id
+        | ),
+        | avg_sales as
+        | (select avg(quantity*list_price) average_sales
+        |  from (select ss_quantity quantity, ss_list_price list_price
+        |         from store_sales, date_dim
+        |         where ss_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2
+        |       union all
+        |         select cs_quantity quantity, cs_list_price list_price
+        |         from catalog_sales, date_dim
+        |         where cs_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2
+        |       union all
+        |         select ws_quantity quantity, ws_list_price list_price
+        |         from web_sales, date_dim
+        |         where ws_sold_date_sk = d_date_sk and d_year between 1999 and 1999 + 2) x)
+        | select * from
+        | (select 'store' channel, i_brand_id,i_class_id,i_category_id
+        |        ,sum(ss_quantity*ss_list_price) sales, count(*) number_sales
+        |  from store_sales, item, date_dim
+        |  where ss_item_sk in (select ss_item_sk from cross_items)
+        |    and ss_item_sk = i_item_sk
+        |    and ss_sold_date_sk = d_date_sk
+        |    and d_week_seq = (select d_week_seq from date_dim
+        |                     where d_year = 1999 + 1 and d_moy = 12 and d_dom = 11)
+        |  group by i_brand_id,i_class_id,i_category_id
+        |  having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) this_year,
+        | (select 'store' channel, i_brand_id,i_class_id
+        |        ,i_category_id, sum(ss_quantity*ss_list_price) sales, count(*) number_sales
+        | from store_sales, item, date_dim
+        | where ss_item_sk in (select ss_item_sk from cross_items)
+        |   and ss_item_sk = i_item_sk
+        |   and ss_sold_date_sk = d_date_sk
+        |   and d_week_seq = (select d_week_seq from date_dim
+        |                     where d_year = 1999 and d_moy = 12 and d_dom = 11)
+        | group by i_brand_id,i_class_id,i_category_id
+        | having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)) last_year
+        | where this_year.i_brand_id= last_year.i_brand_id
+        |   and this_year.i_class_id = last_year.i_class_id
+        |   and this_year.i_category_id = last_year.i_category_id
+        | order by this_year.channel, this_year.i_brand_id, this_year.i_class_id, this_year
+        | .i_category_id
+        | limit 100
+      """.stripMargin),
+    ("q15",
+      """
+        | select ca_zip, sum(cs_sales_price)
+        | from catalog_sales, customer, customer_address, date_dim
+        | where cs_bill_customer_sk = c_customer_sk
+        |   and c_current_addr_sk = ca_address_sk
+        |   and ( substr(ca_zip,1,5) in ('85669', '86197','88274','83405','86475',
+        |                                   '85392', '85460', '80348', '81792')
+        |         or ca_state in ('CA','WA','GA')
+        |         or cs_sales_price > 500)
+        |   and cs_sold_date_sk = d_date_sk
+        |   and d_qoy = 2 and d_year = 2001
+        | group by ca_zip
+        | order by ca_zip
+        | limit 100
+      """.stripMargin),
+    // Modifications: " -> `
+    ("q16",
+      """
+        | select
+        |   count(distinct cs_order_number) as `order count`,
+        |   sum(cs_ext_ship_cost) as `total shipping cost`,
+        |   sum(cs_net_profit) as `total net profit`
+        | from
+        |   catalog_sales cs1, date_dim, customer_address, call_center
+        | where
+        |   d_date between '2002-02-01' and (cast('2002-02-01' as date) + interval 60 days)
+        | and cs1.cs_ship_date_sk = d_date_sk
+        | and cs1.cs_ship_addr_sk = ca_address_sk
+        | and ca_state = 'GA'
+        | and cs1.cs_call_center_sk = cc_call_center_sk
+        | and cc_county in ('Williamson County','Williamson County','Williamson County',
+        | 'Williamson County', 'Williamson County')
+        | and exists (select *
+        |            from catalog_sales cs2
+        |            where cs1.cs_order_number = cs2.cs_order_number
+        |              and cs1.cs_warehouse_sk <> cs2.cs_warehouse_sk)
+        | and not exists(select *
+        |               from catalog_returns cr1
+        |               where cs1.cs_order_number = cr1.cr_order_number)
+        | order by count(distinct cs_order_number)
+        | limit 100
+      """.stripMargin),
+    ("q17",
+      """
+        | select i_item_id
+        |       ,i_item_desc
+        |       ,s_state
+        |       ,count(ss_quantity) as store_sales_quantitycount
+        |       ,avg(ss_quantity) as store_sales_quantityave
+        |       ,stddev_samp(ss_quantity) as store_sales_quantitystdev
+        |       ,stddev_samp(ss_quantity)/avg(ss_quantity) as store_sales_quantitycov
+        |       ,count(sr_return_quantity) as_store_returns_quantitycount
+        |       ,avg(sr_return_quantity) as_store_returns_quantityave
+        |       ,stddev_samp(sr_return_quantity) as_store_returns_quantitystdev
+        |       ,stddev_samp(sr_return_quantity)/avg(sr_return_quantity) as
+        |       store_returns_quantitycov
+        |       ,count(cs_quantity) as catalog_sales_quantitycount ,avg(cs_quantity) as
+        |       catalog_sales_quantityave
+        |       ,stddev_samp(cs_quantity)/avg(cs_quantity) as catalog_sales_quantitystdev
+        |       ,stddev_samp(cs_quantity)/avg(cs_quantity) as catalog_sales_quantitycov
+        | from store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2, date_dim d3,
+        | store, item
+        | where d1.d_quarter_name = '2001Q1'
+        |   and d1.d_date_sk = ss_sold_date_sk
+        |   and i_item_sk = ss_item_sk
+        |   and s_store_sk = ss_store_sk
+        |   and ss_customer_sk = sr_customer_sk
+        |   and ss_item_sk = sr_item_sk
+        |   and ss_ticket_number = sr_ticket_number
+        |   and sr_returned_date_sk = d2.d_date_sk
+        |   and d2.d_quarter_name in ('2001Q1','2001Q2','2001Q3')
+        |   and sr_customer_sk = cs_bill_customer_sk
+        |   and sr_item_sk = cs_item_sk
+        |   and cs_sold_date_sk = d3.d_date_sk
+        |   and d3.d_quarter_name in ('2001Q1','2001Q2','2001Q3')
+        | group by i_item_id, i_item_desc, s_state
+        | order by i_item_id, i_item_desc, s_state
+        | limit 100
+      """.stripMargin),
+    // Modifications: "numeric" -> "decimal"
+    ("q18",
+      """
+        | select i_item_id,
+        |        ca_country,
+        |        ca_state,
+        |        ca_county,
+        |        avg( cast(cs_quantity as decimal(12,2))) agg1,
+        |        avg( cast(cs_list_price as decimal(12,2))) agg2,
+        |        avg( cast(cs_coupon_amt as decimal(12,2))) agg3,
+        |        avg( cast(cs_sales_price as decimal(12,2))) agg4,
+        |        avg( cast(cs_net_profit as decimal(12,2))) agg5,
+        |        avg( cast(c_birth_year as decimal(12,2))) agg6,
+        |        avg( cast(cd1.cd_dep_count as decimal(12,2))) agg7
+        | from catalog_sales, customer_demographics cd1,
+        |      customer_demographics cd2, customer, customer_address, date_dim, item
+        | where cs_sold_date_sk = d_date_sk and
+        |       cs_item_sk = i_item_sk and
+        |       cs_bill_cdemo_sk = cd1.cd_demo_sk and
+        |       cs_bill_customer_sk = c_customer_sk and
+        |       cd1.cd_gender = 'F' and
+        |       cd1.cd_education_status = 'Unknown' and
+        |       c_current_cdemo_sk = cd2.cd_demo_sk and
+        |       c_current_addr_sk = ca_address_sk and
+        |       c_birth_month in (1,6,8,9,12,2) and
+        |       d_year = 1998 and
+        |       ca_state  in ('MS','IN','ND','OK','NM','VA','MS')
+        | group by rollup (i_item_id, ca_country, ca_state, ca_county)
+        | order by ca_country, ca_state, ca_county, i_item_id
+        | LIMIT 100
+      """.stripMargin),
+    ("q19",
+      """
+        | select i_brand_id brand_id, i_brand brand, i_manufact_id, i_manufact,
+        |   sum(ss_ext_sales_price) ext_price
+        | from date_dim, store_sales, item,customer,customer_address,store
+        | where d_date_sk = ss_sold_date_sk
+        |   and ss_item_sk = i_item_sk
+        |   and i_manager_id = 8
+        |   and d_moy = 11
+        |   and d_year = 1998
+        |   and ss_customer_sk = c_customer_sk
+        |   and c_current_addr_sk = ca_address_sk
+        |   and substr(ca_zip,1,5) <> substr(s_zip,1,5)
+        |   and ss_store_sk = s_store_sk
+        | group by i_brand, i_brand_id, i_manufact_id, i_manufact
+        | order by ext_price desc, brand, brand_id, i_manufact_id, i_manufact
+        | limit 100
+      """.stripMargin),
+    ("q20",
+      """
+        |select i_item_desc
+        |       ,i_category
+        |       ,i_class
+        |       ,i_current_price
+        |       ,sum(cs_ext_sales_price) as itemrevenue
+        |       ,sum(cs_ext_sales_price)*100/sum(sum(cs_ext_sales_price)) over
+        |           (partition by i_class) as revenueratio
+        | from catalog_sales, item, date_dim
+        | where cs_item_sk = i_item_sk
+        |   and i_category in ('Sports', 'Books', 'Home')
+        |   and cs_sold_date_sk = d_date_sk
+        | and d_date between cast('1999-02-22' as date)
+        |                           and (cast('1999-02-22' as date) + interval 30 days)
+        | group by i_item_id, i_item_desc, i_category, i_class, i_current_price
+        | order by i_category, i_class, i_item_id, i_item_desc, revenueratio
+        | limit 100
+      """.stripMargin),
+    // Modifications: "+ days" -> date_add
+    ("q21",
+      """
+        | select * from(
+        |   select w_warehouse_name, i_item_id,
+        |          sum(case when (cast(d_date as date) < cast ('2000-03-11' as date))
+        |                     then inv_quantity_on_hand
+        |                   else 0 end) as inv_before,
+        |          sum(case when (cast(d_date as date) >= cast ('2000-03-11' as date))
+        |                   then inv_quantity_on_hand
+        |                   else 0 end) as inv_after
+        |   from inventory, warehouse, item, date_dim
+        |   where i_current_price between 0.99 and 1.49
+        |     and i_item_sk          = inv_item_sk
+        |     and inv_warehouse_sk   = w_warehouse_sk
+        |     and inv_date_sk        = d_date_sk
+        |     and d_date between (cast('2000-03-11' as date) - interval 30 days)
+        |                    and (cast('2000-03-11' as date) + interval 30 days)
+        |   group by w_warehouse_name, i_item_id) x
+        | where (case when inv_before > 0
+        |             then inv_after / inv_before
+        |             else null
+        |             end) between 2.0/3.0 and 3.0/2.0
+        | order by w_warehouse_name, i_item_id
+        | limit 100
+      """.stripMargin),
+    ("q22",
+      """
+        | select i_product_name, i_brand, i_class, i_category, avg(inv_quantity_on_hand) qoh
+        |       from inventory, date_dim, item, warehouse
+        |       where inv_date_sk=d_date_sk
+        |              and inv_item_sk=i_item_sk
+        |              and inv_warehouse_sk = w_warehouse_sk
+        |              and d_month_seq between 1200 and 1200 + 11
+        |       group by rollup(i_product_name, i_brand, i_class, i_category)
+        | order by qoh, i_product_name, i_brand, i_class, i_category
+        | limit 100
+      """.stripMargin),
+    ("q23a",
+      """
+        | with frequent_ss_items as
+        | (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt
+        |  from store_sales, date_dim, item
+        |  where ss_sold_date_sk = d_date_sk
+        |    and ss_item_sk = i_item_sk
+        |    and d_year in (2000, 2000+1, 2000+2,2000+3)
+        |  group by substr(i_item_desc,1,30),i_item_sk,d_date
+        |  having count(*) >4),
+        | max_store_sales as
+        | (select max(csales) tpcds_cmax
+        |  from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales
+        |        from store_sales, customer, date_dim
+        |        where ss_customer_sk = c_customer_sk
+        |         and ss_sold_date_sk = d_date_sk
+        |         and d_year in (2000, 2000+1, 2000+2,2000+3)
+        |        group by c_customer_sk) x),
+        | best_ss_customer as
+        | (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales
+        |  from store_sales, customer
+        |  where ss_customer_sk = c_customer_sk
+        |  group by c_customer_sk
+        |  having sum(ss_quantity*ss_sales_price) > (50/100.0) *
+        |    (select * from max_store_sales))
+        | select sum(sales)
+        | from ((select cs_quantity*cs_list_price sales
+        |       from catalog_sales, date_dim
+        |       where d_year = 2000
+        |         and d_moy = 2
+        |         and cs_sold_date_sk = d_date_sk
+        |         and cs_item_sk in (select item_sk from frequent_ss_items)
+        |         and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer))
+        |      union all
+        |      (select ws_quantity*ws_list_price sales
+        |       from web_sales, date_dim
+        |       where d_year = 2000
+        |         and d_moy = 2
+        |         and ws_sold_date_sk = d_date_sk
+        |         and ws_item_sk in (select item_sk from frequent_ss_items)
+        |         and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer))) y
+        | limit 100
+      """.stripMargin),
+    ("q23b",
+      """
+        |
+        | with frequent_ss_items as
+        | (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt
+        |  from store_sales, date_dim, item
+        |  where ss_sold_date_sk = d_date_sk
+        |    and ss_item_sk = i_item_sk
+        |    and d_year in (2000, 2000+1, 2000+2,2000+3)
+        |  group by substr(i_item_desc,1,30),i_item_sk,d_date
+        |  having count(*) > 4),
+        | max_store_sales as
+        | (select max(csales) tpcds_cmax
+        |  from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales
+        |        from store_sales, customer, date_dim
+        |        where ss_customer_sk = c_customer_sk
+        |         and ss_sold_date_sk = d_date_sk
+        |         and d_year in (2000, 2000+1, 2000+2,2000+3)
+        |        group by c_customer_sk) x),
+        | best_ss_customer as
+        | (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales
+        |  from store_sales
+        |      ,customer
+        |  where ss_customer_sk = c_customer_sk
+        |  group by c_customer_sk
+        |  having sum(ss_quantity*ss_sales_price) > (50/100.0) *
+        |    (select * from max_store_sales))
+        | select c_last_name,c_first_name,sales
+        | from ((select c_last_name,c_first_name,sum(cs_quantity*cs_list_price) sales
+        |        from catalog_sales, customer, date_dim
+        |        where d_year = 2000
+        |         and d_moy = 2
+        |         and cs_sold_date_sk = d_date_sk
+        |         and cs_item_sk in (select item_sk from frequent_ss_items)
+        |         and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer)
+        |         and cs_bill_customer_sk = c_customer_sk
+        |       group by c_last_name,c_first_name)
+        |      union all
+        |      (select c_last_name,c_first_name,sum(ws_quantity*ws_list_price) sales
+        |       from web_sales, customer, date_dim
+        |       where d_year = 2000
+        |         and d_moy = 2
+        |         and ws_sold_date_sk = d_date_sk
+        |         and ws_item_sk in (select item_sk from frequent_ss_items)
+        |         and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer)
+        |         and ws_bill_customer_sk = c_customer_sk
+        |       group by c_last_name,c_first_name)) y
+        |     order by c_last_name,c_first_name,sales
+        | limit 100
+      """.stripMargin),
+    ("q24a",
+      """
+        | with ssales as
+        | (select c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color,
+        |        i_current_price, i_manager_id, i_units, i_size, sum(ss_net_paid) netpaid
+        | from store_sales, store_returns, store, item, customer, customer_address
+        | where ss_ticket_number = sr_ticket_number
+        |   and ss_item_sk = sr_item_sk
+        |   and ss_customer_sk = c_customer_sk
+        |   and ss_item_sk = i_item_sk
+        |   and ss_store_sk = s_store_sk
+        |   and c_birth_country = upper(ca_country)
+        |   and s_zip = ca_zip
+        | and s_market_id = 8
+        | group by c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color,
+        |          i_current_price, i_manager_id, i_units, i_size)
+        | select c_last_name, c_first_name, s_store_name, sum(netpaid) paid
+        | from ssales
+        | where i_color = 'pale'
+        | group by c_last_name, c_first_name, s_store_name
+        | having sum(netpaid) > (select 0.05*avg(netpaid) from ssales)
+      """.stripMargin),
+    ("q24b",
+      """
+        | with ssales as
+        | (select c_last_name, c_first_name, s_store_name, ca_state, s_state, i_color,
+        |         i_current_price, i_manager_id, i_units, i_size, sum(ss_net_paid) netpaid
+        | from store_sales, store_returns, store, item, customer, customer_address
+        | where ss_ticket_number = sr_ticket_number
+        |   and ss_item_sk = sr_item_sk
+        |   and ss_customer_sk = c_customer_sk
+        |   and ss_item_sk = i_item_sk
+        |   and ss_store_sk = s_store_sk
+        |   and c_birth_country = upper(ca_country)
+        |   and s_zip = ca_zip
+        |   and s_market_id = 8
+        | group by c_last_name, c_first_name, s_store_name, ca_state, s_state,
+        |          i_color, i_current_price, i_manager_id, i_units, i_size)
+        | select c_last_name, c_first_name, s_store_name, sum(netpaid) paid
+        | from ssales
+        | where i_color = 'chiffon'
+        | group by c_last_name, c_first_name, s_store_name
+        | having sum(netpaid) > (select 0.05*avg(netpaid) from ssales)
+      """.stripMargin),
+    ("q25",
+      """
+        | select i_item_id, i_item_desc, s_store_id, s_store_name,
+        |    sum(ss_net_profit) as store_sales_profit,
+        |    sum(sr_net_loss) as store_returns_loss,
+        |    sum(cs_net_profit) as catalog_sales_profit
+        | from
+        |    store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2, date_dim d3,
+        |    store, item
+        | where
+        |    d1.d_moy = 4
+        |    and d1.d_year = 2001
+        |    and d1.d_date_sk = ss_sold_date_sk
+        |    and i_item_sk = ss_item_sk
+        |    and s_store_sk = ss_store_sk
+        |    and ss_customer_sk = sr_customer_sk
+        |    and ss_item_sk = sr_item_sk
+        |    and ss_ticket_number = sr_ticket_number
+        |    and sr_returned_date_sk = d2.d_date_sk
+        |    and d2.d_moy between 4 and 10
+        |    and d2.d_year = 2001
+        |    and sr_customer_sk = cs_bill_customer_sk
+        |    and sr_item_sk = cs_item_sk
+        |    and cs_sold_date_sk = d3.d_date_sk
+        |    and d3.d_moy between 4 and 10
+        |    and d3.d_year = 2001
+        | group by
+        |    i_item_id, i_item_desc, s_store_id, s_store_name
+        | order by
+        |    i_item_id, i_item_desc, s_store_id, s_store_name
+        | limit 100
+      """.stripMargin),
+    ("q26",
+      """
+        | select i_item_id,
+        |        avg(cs_quantity) agg1,
+        |        avg(cs_list_price) agg2,
+        |        avg(cs_coupon_amt) agg3,
+        |        avg(cs_sales_price) agg4
+        | from catalog_sales, customer_demographics, date_dim, item, promotion
+        | where cs_sold_date_sk = d_date_sk and
+        |       cs_item_sk = i_item_sk and
+        |       cs_bill_cdemo_sk = cd_demo_sk and
+        |       cs_promo_sk = p_promo_sk and
+        |       cd_gender = 'M' and
+        |       cd_marital_status = 'S' and
+        |       cd_education_status = 'College' and
+        |       (p_channel_email = 'N' or p_channel_event = 'N') and
+        |       d_year = 2000
+        | group by i_item_id
+        | order by i_item_id
+        | limit 100
+      """.stripMargin),
+    ("q27",
+      """
+        | select i_item_id,
+        |        s_state, grouping(s_state) g_state,
+        |        avg(ss_quantity) agg1,
+        |        avg(ss_list_price) agg2,
+        |        avg(ss_coupon_amt) agg3,
+        |        avg(ss_sales_price) agg4
+        | from store_sales, customer_demographics, date_dim, store, item
+        | where ss_sold_date_sk = d_date_sk and
+        |       ss_item_sk = i_item_sk and
+        |       ss_store_sk = s_store_sk and
+        |       ss_cdemo_sk = cd_demo_sk and
+        |       cd_gender = 'M' and
+        |       cd_marital_status = 'S' and
+        |       cd_education_status = 'College' and
+        |       d_year = 2002 and
+        |       s_state in ('TN','TN', 'TN', 'TN', 'TN', 'TN')
+        | group by rollup (i_item_id, s_state)
+        | order by i_item_id, s_state
+        | limit 100
+      """.stripMargin),
+    ("q28",
+      """
+        | select *
+        | from (select avg(ss_list_price) B1_LP
+        |            ,count(ss_list_price) B1_CNT
+        |            ,count(distinct ss_list_price) B1_CNTD
+        |      from store_sales
+        |      where ss_quantity between 0 and 5
+        |        and (ss_list_price between 8 and 8+10
+        |             or ss_coupon_amt between 459 and 459+1000
+        |             or ss_wholesale_cost between 57 and 57+20)) B1,
+        |     (select avg(ss_list_price) B2_LP
+        |            ,count(ss_list_price) B2_CNT
+        |            ,count(distinct ss_list_price) B2_CNTD
+        |      from store_sales
+        |      where ss_quantity between 6 and 10
+        |        and (ss_list_price between 90 and 90+10
+        |             or ss_coupon_amt between 2323 and 2323+1000
+        |             or ss_wholesale_cost between 31 and 31+20)) B2,
+        |     (select avg(ss_list_price) B3_LP
+        |            ,count(ss_list_price) B3_CNT
+        |            ,count(distinct ss_list_price) B3_CNTD
+        |      from store_sales
+        |      where ss_quantity between 11 and 15
+        |        and (ss_list_price between 142 and 142+10
+        |             or ss_coupon_amt between 12214 and 12214+1000
+        |             or ss_wholesale_cost between 79 and 79+20)) B3,
+        |     (select avg(ss_list_price) B4_LP
+        |            ,count(ss_list_price) B4_CNT
+        |            ,count(distinct ss_list_price) B4_CNTD
+        |      from store_sales
+        |      where ss_quantity between 16 and 20
+        |        and (ss_list_price between 135 and 135+10
+        |             or ss_coupon_amt between 6071 and 6071+1000
+        |             or ss_wholesale_cost between 38 and 38+20)) B4,
+        |     (select avg(ss_list_price) B5_LP
+        |            ,count(ss_list_price) B5_CNT
+        |            ,count(distinct ss_list_price) B5_CNTD
+        |      from store_sales
+        |      where ss_quantity between 21 and 25
+        |        and (ss_list_price between 122 and 122+10
+        |             or ss_coupon_amt between 836 and 836+1000
+        |             or ss_wholesale_cost between 17 and 17+20)) B5,
+        |     (select avg(ss_list_price) B6_LP
+        |            ,count(ss_list_price) B6_CNT
+        |            ,count(distinct ss_list_price) B6_CNTD
+        |      from store_sales
+        |      where ss_quantity between 26 and 30
+        |        and (ss_list_price between 154 and 154+10
+        |             or ss_coupon_amt between 7326 and 7326+1000
+        |             or ss_wholesale_cost between 7 and 7+20)) B6
+        | limit 100
+      """.stripMargin),
+    ("q29",
+      """
+        | select
+        |     i_item_id
+        |    ,i_item_desc
+        |    ,s_store_id
+        |    ,s_store_name
+        |    ,sum(ss_quantity)        as store_sales_quantity
+        |    ,sum(sr_return_quantity) as store_returns_quantity
+        |    ,sum(cs_quantity)        as catalog_sales_quantity
+        | from
+        |    store_sales, store_returns, catalog_sales, date_dim d1, date_dim d2,
+        |    date_dim d3, store, item
+        | where
+        |     d1.d_moy               = 9
+        | and d1.d_year              = 1999
+        | and d1.d_date_sk           = ss_sold_date_sk
+        | and i_item_sk              = ss_item_sk
+        | and s_store_sk             = ss_store_sk
+        | and ss_customer_sk         = sr_customer_sk
+        | and ss_item_sk             = sr_item_sk
+        | and ss_ticket_number       = sr_ticket_number
+        | and sr_returned_date_sk    = d2.d_date_sk
+        | and d2.d_moy               between 9 and  9 + 3
+        | and d2.d_year              = 1999
+        | and sr_customer_sk         = cs_bill_customer_sk
+        | and sr_item_sk             = cs_item_sk
+        | and cs_sold_date_sk        = d3.d_date_sk
+        | and d3.d_year              in (1999,1999+1,1999+2)
+        | group by
+        |    i_item_id, i_item_desc, s_store_id, s_store_name
+        | order by
+        |    i_item_id, i_item_desc, s_store_id, s_store_name
+        | limit 100
+      """.stripMargin),
+    ("q30",
+      """
+        | with customer_total_return as
+        | (select wr_returning_customer_sk as ctr_customer_sk
+        |        ,ca_state as ctr_state,
+        |   sum(wr_return_amt) as ctr_total_return
+        | from web_returns, date_dim, customer_address
+        | where wr_returned_date_sk = d_date_sk
+        |   and d_year = 2002
+        |   and wr_returning_addr_sk = ca_address_sk
+        | group by wr_returning_customer_sk,ca_state)
+        | select c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag
+        |       ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,c_email_address
+        |       ,c_last_review_date,ctr_total_return
+        | from customer_total_return ctr1, customer_address, customer
+        | where ctr1.ctr_total_return > (select avg(ctr_total_return)*1.2
+        |                     from customer_total_return ctr2
+        |                     where ctr1.ctr_state = ctr2.ctr_state)
+        |       and ca_address_sk = c_current_addr_sk
+        |       and ca_state = 'GA'
+        |       and ctr1.ctr_customer_sk = c_customer_sk
+        | order by c_customer_id,c_salutation,c_first_name,c_last_name,c_preferred_cust_flag
+        |                  ,c_birth_day,c_birth_month,c_birth_year,c_birth_country,c_login,
+        |                  c_email_address
+        |                  ,c_last_review_date,ctr_total_return
+        | limit 100
+      """.stripMargin),
+    ("q31",
+      """
+        | with ss as
+        | (select ca_county,d_qoy, d_year,sum(ss_ext_sales_price) as store_sales
+        | from store_sales,date_dim,customer_address
+        | where ss_sold_date_sk = d_date_sk
+        |  and ss_addr_sk=ca_address_sk
+        | group by ca_county,d_qoy, d_year),
+        | ws as
+        | (select ca_county,d_qoy, d_year,sum(ws_ext_sales_price) as web_sales
+        | from web_sales,date_dim,customer_address
+        | where ws_sold_date_sk = d_date_sk
+        |  and ws_bill_addr_sk=ca_address_sk
+        | group by ca_county,d_qoy, d_year)
+        | select
+        |        ss1.ca_county
+        |       ,ss1.d_year
+        |       ,ws2.web_sales/ws1.web_sales web_q1_q2_increase
+        |       ,ss2.store_sales/ss1.store_sales store_q1_q2_increase
+        |       ,ws3.web_sales/ws2.web_sales web_q2_q3_increase
+        |       ,ss3.store_sales/ss2.store_sales store_q2_q3_increase
+        | from
+        |        ss ss1, ss ss2, ss ss3, ws ws1, ws ws2, ws ws3
+        | where
+        |    ss1.d_qoy = 1
+        |    and ss1.d_year = 2000
+        |    and ss1.ca_county = ss2.ca_county
+        |    and ss2.d_qoy = 2
+        |    and ss2.d_year = 2000
+        | and ss2.ca_county = ss3.ca_county
+        |    and ss3.d_qoy = 3
+        |    and ss3.d_year = 2000
+        |    and ss1.ca_county = ws1.ca_county
+        |    and ws1.d_qoy = 1
+        |    and ws1.d_year = 2000
+        |    and ws1.ca_county = ws2.ca_county
+        |    and ws2.d_qoy = 2
+        |    and ws2.d_year = 2000
+        |    and ws1.ca_county = ws3.ca_county
+        |    and ws3.d_qoy = 3
+        |    and ws3.d_year = 2000
+        |    and case when ws1.web_sales > 0 then ws2.web_sales/ws1.web_sales else null end
+        |       > case when ss1.store_sales > 0 then ss2.store_sales/ss1.store_sales else null end
+        |    and case when ws2.web_sales > 0 then ws3.web_sales/ws2.web_sales else null end
+        |       > case when ss2.store_sales > 0 then ss3.store_sales/ss2.store_sales else null end
+        | order by ss1.ca_county
+      """.stripMargin),
+    // Modifications: " -> `
+    ("q32",
+      """
+        | select sum(cs_ext_discount_amt) as `excess discount amount`
+        | from
+        |    catalog_sales, item, date_dim
+        | where
+        |   i_manufact_id = 977
+        |   and i_item_sk = cs_item_sk
+        |   and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 days)
+        |   and d_date_sk = cs_sold_date_sk
+        |   and cs_ext_discount_amt > (
+        |          select 1.3 * avg(cs_ext_discount_amt)
+        |          from catalog_sales, date_dim
+        |          where cs_item_sk = i_item_sk
+        |           and d_date between '2000-01-27]' and (cast('2000-01-27' as date) + interval
+        |           90 days)
+        |           and d_date_sk = cs_sold_date_sk)
+        |limit 100
+      """.stripMargin),
+    ("q33",
+      """
+        | with ss as (
+        |    select
+        |        i_manufact_id,sum(ss_ext_sales_price) total_sales
+        |    from
+        |         store_sales, date_dim, customer_address, item
+        |    where
+        |        i_manufact_id in (select i_manufact_id
+        |                          from item
+        |                          where i_category in ('Electronics'))
+        |                            and ss_item_sk = i_item_sk
+        |                            and ss_sold_date_sk = d_date_sk
+        |                            and d_year = 1998
+        |                            and d_moy = 5
+        |                            and ss_addr_sk = ca_address_sk
+        |                            and ca_gmt_offset = -5
+        |                          group by i_manufact_id), cs as
+        |         (select i_manufact_id, sum(cs_ext_sales_price) total_sales
+        |          from catalog_sales, date_dim, customer_address, item
+        |          where
+        |            i_manufact_id in (
+        |                select i_manufact_id from item
+        |                where
+        |                    i_category in ('Electronics'))
+        |                    and cs_item_sk = i_item_sk
+        |                    and cs_sold_date_sk = d_date_sk
+        |                    and d_year = 1998
+        |                    and d_moy = 5
+        |                    and cs_bill_addr_sk = ca_address_sk
+        |                    and ca_gmt_offset = -5
+        |                group by i_manufact_id),
+        | ws as (
+        | select i_manufact_id,sum(ws_ext_sales_price) total_sales
+        | from
+        |     web_sales, date_dim, customer_address, item
+        | where
+        |    i_manufact_id in (select i_manufact_id from item
+        |                      where i_category in ('Electronics'))
+        |                          and ws_item_sk = i_item_sk
+        |                          and ws_sold_date_sk = d_date_sk
+        |                          and d_year = 1998
+        |                          and d_moy = 5
+        |                          and ws_bill_addr_sk = ca_address_sk
+        |                          and ca_gmt_offset = -5
+        |                      group by i_manufact_id)
+        | select i_manufact_id ,sum(total_sales) total_sales
+        | from  (select * from ss
+        |        union all
+        |        select * from cs
+        |        union all
+        |        select * from ws) tmp1
+        | group by i_manufact_id
+        | order by total_sales
+        |limit 100
+      """.stripMargin),
+    ("q34",
+      """
+        | select c_last_name, c_first_name, c_salutation, c_preferred_cust_flag, ss_ticket_number,
+        |        cnt
+        | FROM
+        |   (select ss_ticket_number, ss_customer_sk, count(*) cnt
+        |    from store_sales,date_dim,store,household_demographics
+        |    where store_sales.ss_sold_date_sk = date_dim.d_date_sk
+        |    and store_sales.ss_store_sk = store.s_store_sk
+        |    and store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
+        |    and (date_dim.d_dom between 1 and 3 or date_dim.d_dom between 25 and 28)
+        |    and (household_demographics.hd_buy_potential = '>10000' or
+        |         household_demographics.hd_buy_potential = 'unknown')
+        |    and household_demographics.hd_vehicle_count > 0
+        |    and (case when household_demographics.hd_vehicle_count > 0
+        |   then household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count
+        |   else null
+        |   end)  > 1.2
+        |    and date_dim.d_year in (1999, 1999+1, 1999+2)
+        |    and store.s_county in ('Williamson County','Williamson County','Williamson County',
+        |    'Williamson County',
+        |                           'Williamson County','Williamson County','Williamson County',
+        |                           'Williamson County')
+        |    group by ss_ticket_number,ss_customer_sk) dn,customer
+        |    where ss_customer_sk = c_customer_sk
+        |      and cnt between 15 and 20
+        |    order by c_last_name,c_first_name,c_salutation,c_preferred_cust_flag desc
+      """.stripMargin),
+    ("q35",
+      """
+        | select
+        |  ca_state,
+        |  cd_gender,
+        |  cd_marital_status,
+        |  count(*) cnt1,
+        |  min(cd_dep_count),
+        |  max(cd_dep_count),
+        |  avg(cd_dep_count),
+        |  cd_dep_employed_count,
+        |  count(*) cnt2,
+        |  min(cd_dep_employed_count),
+        |  max(cd_dep_employed_count),
+        |  avg(cd_dep_employed_count),
+        |  cd_dep_college_count,
+        |  count(*) cnt3,
+        |  min(cd_dep_college_count),
+        |  max(cd_dep_college_count),
+        |  avg(cd_dep_college_count)
+        | from
+        |  customer c,customer_address ca,customer_demographics
+        | where
+        |  c.c_current_addr_sk = ca.ca_address_sk and
+        |  cd_demo_sk = c.c_current_cdemo_sk and
+        |  exists (select * from store_sales, date_dim
+        |          where c.c_customer_sk = ss_customer_sk and
+        |                ss_sold_date_sk = d_date_sk and
+        |                d_year = 2002 and
+        |                d_qoy < 4) and
+        |   (exists (select * from web_sales, date_dim
+        |            where c.c_customer_sk = ws_bill_customer_sk and
+        |                  ws_sold_date_sk = d_date_sk and
+        |                  d_year = 2002 and
+        |                  d_qoy < 4) or
+        |    exists (select * from catalog_sales, date_dim
+        |            where c.c_customer_sk = cs_ship_customer_sk and
+        |                  cs_sold_date_sk = d_date_sk and
+        |                  d_year = 2002 and
+        |                  d_qoy < 4))
+        | group by ca_state, cd_gender, cd_marital_status, cd_dep_count,
+        |          cd_dep_employed_count, cd_dep_college_count
+        | order by ca_state, cd_gender, cd_marital_status, cd_dep_count,
+        |          cd_dep_employed_count, cd_dep_college_count
+        | limit 100
+      """.stripMargin),
+    ("q36",
+      """
+        | select
+        |    sum(ss_net_profit)/sum(ss_ext_sales_price) as gross_margin
+        |   ,i_category
+        |   ,i_class
+        |   ,grouping(i_category)+grouping(i_class) as lochierarchy
+        |   ,rank() over (
+        |   partition by grouping(i_category)+grouping(i_class),
+        |   case when grouping(i_class) = 0 then i_category end
+        |   order by sum(ss_net_profit)/sum(ss_ext_sales_price) asc) as rank_within_parent
+        | from
+        |    store_sales, date_dim d1, item, store
+        | where
+        |    d1.d_year = 2001
+        |    and d1.d_date_sk = ss_sold_date_sk
+        |    and i_item_sk  = ss_item_sk
+        |    and s_store_sk  = ss_store_sk
+        |    and s_state in ('TN','TN','TN','TN','TN','TN','TN','TN')
+        | group by rollup(i_category,i_class)
+        | order by
+        |   lochierarchy desc
+        |  ,case when lochierarchy = 0 then i_category end
+        |  ,rank_within_parent
+        | limit 100
+      """.stripMargin),
+    // Modifications: "+ days" -> date_add
+    ("q37",
+      """
+        | select i_item_id, i_item_desc, i_current_price
+        | from item, inventory, date_dim, catalog_sales
+        | where i_current_price between 68 and 68 + 30
+        |   and inv_item_sk = i_item_sk
+        |   and d_date_sk=inv_date_sk
+        |   and d_date between cast('2000-02-01' as date) and (cast('2000-02-01' as date) +
+        |   interval 60 days)
+        |   and i_manufact_id in (677,940,694,808)
+        |   and inv_quantity_on_hand between 100 and 500
+        |   and cs_item_sk = i_item_sk
+        | group by i_item_id,i_item_desc,i_current_price
+        | order by i_item_id
+        | limit 100
+      """.stripMargin),
+    ("q38",
+      """
+        | select count(*) from (
+        |    select distinct c_last_name, c_first_name, d_date
+        |    from store_sales, date_dim, customer
+        |          where store_sales.ss_sold_date_sk = date_dim.d_date_sk
+        |      and store_sales.ss_customer_sk = customer.c_customer_sk
+        |      and d_month_seq between 1200 and  1200 + 11
+        |  intersect
+        |    select distinct c_last_name, c_first_name, d_date
+        |    from catalog_sales, date_dim, customer
+        |          where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk
+        |      and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk
+        |      and d_month_seq between  1200 and  1200 + 11
+        |  intersect
+        |    select distinct c_last_name, c_first_name, d_date
+        |    from web_sales, date_dim, customer
+        |          where web_sales.ws_sold_date_sk = date_dim.d_date_sk
+        |      and web_sales.ws_bill_customer_sk = customer.c_customer_sk
+        |      and d_month_seq between  1200 and  1200 + 11
+        | ) hot_cust
+        | limit 100
+      """.stripMargin),
+    ("q39a",
+      """
+        | with inv as
+        | (select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
+        |        ,stdev,mean, case mean when 0 then null else stdev/mean end cov
+        |  from(select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
+        |             ,stddev_samp(inv_quantity_on_hand) stdev,avg(inv_quantity_on_hand) mean
+        |       from inventory, item, warehouse, date_dim
+        |       where inv_item_sk = i_item_sk
+        |         and inv_warehouse_sk = w_warehouse_sk
+        |         and inv_date_sk = d_date_sk
+        |         and d_year = 2001
+        |       group by w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy) foo
+        |  where case mean when 0 then 0 else stdev/mean end > 1)
+        | select inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean, inv1.cov
+        |         ,inv2.w_warehouse_sk,inv2.i_item_sk,inv2.d_moy,inv2.mean, inv2.cov
+        | from inv inv1,inv inv2
+        | where inv1.i_item_sk = inv2.i_item_sk
+        |   and inv1.w_warehouse_sk =  inv2.w_warehouse_sk
+        |   and inv1.d_moy=1
+        |   and inv2.d_moy=1+1
+        | order by inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean,inv1.cov
+        |         ,inv2.d_moy,inv2.mean, inv2.cov
+      """.stripMargin),
+    ("q39b",
+      """
+        | with inv as
+        | (select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
+        |        ,stdev,mean, case mean when 0 then null else stdev/mean end cov
+        |  from(select w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy
+        |             ,stddev_samp(inv_quantity_on_hand) stdev,avg(inv_quantity_on_hand) mean
+        |       from inventory, item, warehouse, date_dim
+        |       where inv_item_sk = i_item_sk
+        |         and inv_warehouse_sk = w_warehouse_sk
+        |         and inv_date_sk = d_date_sk
+        |         and d_year = 2001
+        |       group by w_warehouse_name,w_warehouse_sk,i_item_sk,d_moy) foo
+        |  where case mean when 0 then 0 else stdev/mean end > 1)
+        | select inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean, inv1.cov
+        |         ,inv2.w_warehouse_sk,inv2.i_item_sk,inv2.d_moy,inv2.mean, inv2.cov
+        | from inv inv1,inv inv2
+        | where inv1.i_item_sk = inv2.i_item_sk
+        |   and inv1.w_warehouse_sk =  inv2.w_warehouse_sk
+        |   and inv1.d_moy=1
+        |   and inv2.d_moy=1+1
+        |   and inv1.cov > 1.5
+        | order by inv1.w_warehouse_sk,inv1.i_item_sk,inv1.d_moy,inv1.mean,inv1.cov
+        |         ,inv2.d_moy,inv2.mean, inv2.cov
+      """.stripMargin),
+    // Modifications: "+ days" -> date_add
+    ("q40",
+      """
+        | select
+        |   w_state
+        |  ,i_item_id
+        |  ,sum(case when (cast(d_date as date) < cast('2000-03-11' as date))
+        |           then cs_sales_price - coalesce(cr_refunded_cash,0) else 0 end) as sales_before
+        |  ,sum(case when (cast(d_date as date) >= cast('2000-03-11' as date))
+        |           then cs_sales_price - coalesce(cr_refunded_cash,0) else 0 end) as sales_after
+        | from
+        |   catalog_sales left outer join catalog_returns on
+        |       (cs_order_number = cr_order_number
+        |        and cs_item_sk = cr_item_sk)
+        |  ,warehouse, item, date_dim
+        | where
+        |     i_current_price between 0.99 and 1.49
+        | and i_item_sk          = cs_item_sk
+        | and cs_warehouse_sk    = w_warehouse_sk
+        | and cs_sold_date_sk    = d_date_sk
+        | and d_date between (cast('2000-03-11' as date) - interval 30 days)
+        |                and (cast('2000-03-11' as date) + interval 30 days)
+        | group by w_state,i_item_id
+        | order by w_state,i_item_id
+        | limit 100
+      """.stripMargin),
+    ("q41",
+      """
+        | select distinct(i_product_name)
+        | from item i1
+        | where i_manufact_id between 738 and 738+40
+        |   and (select count(*) as item_cnt
+        |        from item
+        |        where (i_manufact = i1.i_manufact and
+        |        ((i_category = 'Women' and
+        |        (i_color = 'powder' or i_color = 'khaki') and
+        |        (i_units = 'Ounce' or i_units = 'Oz') and
+        |        (i_size = 'medium' or i_size = 'extra large')
+        |        ) or
+        |        (i_category = 'Women' and
+        |        (i_color = 'brown' or i_color = 'honeydew') and
+        |        (i_units = 'Bunch' or i_units = 'Ton') and
+        |        (i_size = 'N/A' or i_size = 'small')
+        |        ) or
+        |        (i_category = 'Men' and
+        |        (i_color = 'floral' or i_color = 'deep') and
+        |        (i_units = 'N/A' or i_units = 'Dozen') and
+        |        (i_size = 'petite' or i_size = 'large')
+        |        ) or
+        |        (i_category = 'Men' and
+        |        (i_color = 'light' or i_color = 'cornflower') and
+        |        (i_units = 'Box' or i_units = 'Pound') and
+        |        (i_size = 'medium' or i_size = 'extra large')
+        |        ))) or
+        |       (i_manufact = i1.i_manufact and
+        |        ((i_category = 'Women' and
+        |        (i_color = 'midnight' or i_color = 'snow') and
+        |        (i_units = 'Pallet' or i_units = 'Gross') and
+        |        (i_size = 'medium' or i_size = 'extra large')
+        |        ) or
+        |        (i_category = 'Women' and
+        |        (i_color = 'cyan' or i_color = 'papaya') and
+        |        (i_units = 'Cup' or i_units = 'Dram') and
+        |        (i_size = 'N/A' or i_size = 'small')
+        |        ) or
+        |        (i_category = 'Men' and
+        |        (i_color = 'orange' or i_color = 'frosted') and
+        |        (i_units = 'Each' or i_units = 'Tbl') and
+        |        (i_size = 'petite' or i_size = 'large')
+        |        ) or
+        |        (i_category = 'Men' and
+        |        (i_color = 'forest' or i_color = 'ghost') and
+        |        (i_units = 'Lb' or i_units = 'Bundle') and
+        |        (i_size = 'medium' or i_size = 'extra large')
+        |        )))) > 0
+        | order by i_product_name
+        | limit 100
+      """.stripMargin),
+    ("q42",
+      """
+        | select dt.d_year, item.i_category_id, item.i_category, sum(ss_ext_sales_price)
+        | from      date_dim dt, store_sales, item
+        | where dt.d_date_sk = store_sales.ss_sold_date_sk
+        |   and store_sales.ss_item_sk = item.i_item_sk
+        |   and item.i_manager_id = 1
+        |   and dt.d_moy=11
+        |   and dt.d_year=2000
+        | group by  dt.d_year
+        |           ,item.i_category_id
+        |           ,item.i_category
+        | order by       sum(ss_ext_sales_price) desc,dt.d_year
+        |           ,item.i_category_id
+        |           ,item.i_category
+        | limit 100
+      """.stripMargin),
+    ("q43",
+      """
+        | select s_store_name, s_store_id,
+        |        sum(case when (d_day_name='Sunday') then ss_sales_price else null end) sun_sales,
+        |        sum(case when (d_day_name='Monday') then ss_sales_price else null end) mon_sales,
+        |        sum(case when (d_day_name='Tuesday') then ss_sales_price else  null end) tue_sales,
+        |        sum(case when (d_day_name='Wednesday') then ss_sales_price else null end)
+        |        wed_sales,
+        |        sum(case when (d_day_name='Thursday') then ss_sales_price else null end) thu_sales,
+        |        sum(case when (d_day_name='Friday') then ss_sales_price else null end) fri_sales,
+        |        sum(case when (d_day_name='Saturday') then ss_sales_price else null end) sat_sales
+        | from date_dim, store_sales, store
+        | where d_date_sk = ss_sold_date_sk and
+        |       s_store_sk = ss_store_sk and
+        |       s_gmt_offset = -5 and
+        |       d_year = 2000
+        | group by s_store_name, s_store_id
+        | order by s_store_name, s_store_id,sun_sales,mon_sales,tue_sales,wed_sales,
+        |          thu_sales,fri_sales,sat_sales
+        | limit 100
+      """.stripMargin),
+    ("q44",
+      """
+        | select asceding.rnk, i1.i_product_name best_performing, i2.i_product_name worst_performing
+        | from(select *
+        |     from (select item_sk,rank() over (order by rank_col asc) rnk
+        |           from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col
+        |                 from store_sales ss1
+        |                 where ss_store_sk = 4
+        |                 group by ss_item_sk
+        |                 having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col
+        |                                                  from store_sales
+        |                                                  where ss_store_sk = 4
+        |                                                    and ss_addr_sk is null
+        |                                                  group by ss_store_sk))V1)V11
+        |     where rnk  < 11) asceding,
+        |    (select *
+        |     from (select item_sk,rank() over (order by rank_col desc) rnk
+        |           from (select ss_item_sk item_sk,avg(ss_net_profit) rank_col
+        |                 from store_sales ss1
+        |                 where ss_store_sk = 4
+        |                 group by ss_item_sk
+        |                 having avg(ss_net_profit) > 0.9*(select avg(ss_net_profit) rank_col
+        |                                                  from store_sales
+        |                                                  where ss_store_sk = 4
+        |                                                    and ss_addr_sk is null
+        |                                                  group by ss_store_sk))V2)V21
+        |     where rnk  < 11) descending,
+        | item i1, item i2
+        | where asceding.rnk = descending.rnk
+        |   and i1.i_item_sk=asceding.item_sk
+        |   and i2.i_item_sk=descending.item_sk
+        |

<TRUNCATED>

[03/50] [abbrv] carbondata git commit: [CARBONDATA-2426] Added fix for data mismatch after compaction on Partition with Pre-Aggregate tables

Posted by ma...@apache.org.
[CARBONDATA-2426] Added fix for data mismatch after compaction on Partition with Pre-Aggregate tables

Problem: Partition directory is getting deleted when one on the segment is marked for delete and
another segment is loaded in the same partition.
In case of aggregate table we only have partition specs for the specified segments therefore
when the deleted segment is scanned against locationMap for stale partitions one of the
valid partition is considered as stale as we dont get partitionSpecs for all the segments.

Solution: Delete the index file instead of the partition directory and then delete the directory if it is empty.

This closes #2259


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/09feb9cc
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/09feb9cc
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/09feb9cc

Branch: refs/heads/spark-2.3
Commit: 09feb9cc2c80f648a5789923394dbdd41f2c8e67
Parents: 325eac2
Author: praveenmeenakshi56 <pr...@gmail.com>
Authored: Wed May 2 15:39:28 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Tue May 8 17:38:07 2018 +0530

----------------------------------------------------------------------
 .../core/metadata/SegmentFileStore.java         | 17 +++++++-
 ...ndardPartitionWithPreaggregateTestCase.scala | 43 ++++++++++++++++++++
 2 files changed, 59 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/09feb9cc/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
index 9326b1d..d72ded3 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
@@ -700,16 +700,31 @@ public class SegmentFileStore {
     FileFactory.deleteFile(segmentFilePath, FileFactory.getFileType(segmentFilePath));
   }
 
+  /**
+   * If partition specs are available, then check the location map for any index file path which is
+   * not present in the partitionSpecs. If found then delete that index file.
+   * If the partition directory is empty, then delete the directory also.
+   * If partition specs are null, then directly delete parent directory in locationMap.
+   */
   private static void deletePhysicalPartition(List<PartitionSpec> partitionSpecs,
       Map<String, List<String>> locationMap) throws IOException {
     for (Map.Entry<String, List<String>> entry : locationMap.entrySet()) {
-      Path location = new Path(entry.getKey()).getParent();
       if (partitionSpecs != null) {
+        Path location = new Path(entry.getKey());
         boolean exists = pathExistsInPartitionSpec(partitionSpecs, location);
         if (!exists) {
           FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(location.toString()));
+          for (String carbonDataFile : entry.getValue()) {
+            FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(carbonDataFile));
+          }
+        }
+        CarbonFile path = FileFactory.getCarbonFile(location.getParent().toString());
+        if (path.listFiles().length == 0) {
+          FileFactory.deleteAllCarbonFilesOfDir(
+              FileFactory.getCarbonFile(location.getParent().toString()));
         }
       } else {
+        Path location = new Path(entry.getKey()).getParent();
         // delete the segment folder
         CarbonFile segmentPath = FileFactory.getCarbonFile(location.toString());
         if (null != segmentPath) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/09feb9cc/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionWithPreaggregateTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionWithPreaggregateTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionWithPreaggregateTestCase.scala
index 8a3ae3f..84c07c4 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionWithPreaggregateTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionWithPreaggregateTestCase.scala
@@ -525,6 +525,49 @@ class StandardPartitionWithPreaggregateTestCase extends QueryTest with BeforeAnd
     sql("drop table if exists updatetime_8")
   }
 
+  test("Test data updation after compaction on Partition with Pre-Aggregate tables") {
+    sql("drop table if exists partitionallcompaction")
+    sql(
+      "create table partitionallcompaction(empno int,empname String,designation String," +
+      "workgroupcategory int,workgroupcategoryname String,deptno int,projectjoindate timestamp," +
+      "projectenddate date,attendance int,utilization int,salary int) partitioned by (deptname " +
+      "String,doj timestamp,projectcode int) stored  by 'carbondata' tblproperties" +
+      "('sort_scope'='global_sort')")
+    sql(
+      "create datamap sensor_1 on table partitionallcompaction using 'preaggregate' as select " +
+      "sum(salary),doj, deptname,projectcode from partitionallcompaction group by doj," +
+      "deptname,projectcode")
+    sql(
+      s"""LOAD DATA local inpath '$resourcesPath/data.csv' OVERWRITE INTO TABLE
+         |partitionallcompaction OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""".stripMargin)
+    sql(
+      s"""LOAD DATA local inpath '$resourcesPath/data.csv' OVERWRITE INTO TABLE
+         |partitionallcompaction OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""".stripMargin)
+    sql(
+      s"""LOAD DATA local inpath '$resourcesPath/data.csv' OVERWRITE INTO TABLE
+         |partitionallcompaction PARTITION(deptname='Learning', doj, projectcode) OPTIONS
+         |('DELIMITER'= ',', 'QUOTECHAR'= '"') """.stripMargin)
+    sql(
+      s"""LOAD DATA local inpath '$resourcesPath/data.csv' OVERWRITE INTO TABLE
+         |partitionallcompaction PARTITION(deptname='configManagement', doj, projectcode) OPTIONS
+         |('DELIMITER'= ',', 'QUOTECHAR'= '"')""".stripMargin)
+    sql(
+      s"""LOAD DATA local inpath '$resourcesPath/data.csv' OVERWRITE INTO TABLE
+         |partitionallcompaction PARTITION(deptname='network', doj, projectcode) OPTIONS
+         |('DELIMITER'= ',', 'QUOTECHAR'= '"')""".stripMargin)
+    sql(
+      s"""LOAD DATA local inpath '$resourcesPath/data.csv' OVERWRITE INTO TABLE
+         |partitionallcompaction PARTITION(deptname='protocol', doj, projectcode) OPTIONS
+         |('DELIMITER'= ',', 'QUOTECHAR'= '"')""".stripMargin)
+    sql(
+      s"""LOAD DATA local inpath '$resourcesPath/data.csv' OVERWRITE INTO TABLE
+         |partitionallcompaction PARTITION(deptname='security', doj, projectcode) OPTIONS
+         |('DELIMITER'= ',', 'QUOTECHAR'= '"')""".stripMargin)
+    sql("ALTER TABLE partitionallcompaction COMPACT 'MINOR'").collect()
+    checkAnswer(sql("select count(empno) from partitionallcompaction where empno=14"),
+      Seq(Row(5)))
+  }
+
   test("Test data updation in Aggregate query after compaction on Partitioned table with Pre-Aggregate table") {
     sql("drop table if exists updatetime_8")
     sql("create table updatetime_8" +


[21/50] [abbrv] carbondata git commit: [CARBONDATA-2452] [CARBONDATA-2451] [CARBONDATA-2450] [CARBONDATA-2453] Fixed issues related to complex types

Posted by ma...@apache.org.
[CARBONDATA-2452] [CARBONDATA-2451] [CARBONDATA-2450] [CARBONDATA-2453] Fixed issues related to complex types

Issue 1: Dictionary encoding was being added to complex types in SDK
case which led to data load failure
Issue 2: Sort columns were not being checked against table schema to
validate the same.
Issue 3: Bad record handling was not there for complex types.
Issue 4: Parent name was not being prepended to field name before
checking for duplicates which threw duplicate column exception

This closes #2278


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/6b70b7e4
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/6b70b7e4
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/6b70b7e4

Branch: refs/heads/spark-2.3
Commit: 6b70b7e47b05a612ccb5a5ad01ee2d5a05ffa600
Parents: 8e7fceb
Author: kunal642 <ku...@gmail.com>
Authored: Mon May 7 20:58:21 2018 +0530
Committer: kumarvishal09 <ku...@gmail.com>
Committed: Fri May 11 03:27:36 2018 +0530

----------------------------------------------------------------------
 .../schema/table/TableSchemaBuilder.java        |  21 +-
 .../complexType/TestComplexTypeQuery.scala      |   2 +
 .../TestNonTransactionalCarbonTable.scala       | 410 +++++++++++++++++--
 .../processing/datatypes/ArrayDataType.java     |  11 +-
 .../processing/datatypes/GenericDataType.java   |   3 +-
 .../processing/datatypes/PrimitiveDataType.java |  41 +-
 .../processing/datatypes/StructDataType.java    |  11 +-
 .../loading/DataLoadProcessBuilder.java         |   9 +
 .../impl/ComplexFieldConverterImpl.java         |   2 +-
 .../DirectDictionaryFieldConverterImpl.java     |   1 -
 .../loading/model/CarbonLoadModelBuilder.java   |  15 +-
 .../InputProcessorStepWithNoConverterImpl.java  |  32 +-
 .../sdk/file/CarbonWriterBuilder.java           |  24 +-
 13 files changed, 524 insertions(+), 58 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
index b078400..03d03f8 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
@@ -122,7 +122,13 @@ public class TableSchemaBuilder {
   private ColumnSchema addColumn(StructField field, String parentName, AtomicInteger valIndex,
       boolean isSortColumn, boolean isComplexChild) {
     Objects.requireNonNull(field);
-    checkRepeatColumnName(field);
+    if (isComplexChild) {
+      // if field is complex then append parent name to the child field to check
+      // if any other field with same name exists
+      checkRepeatColumnName(field, parentName);
+    } else {
+      checkRepeatColumnName(field);
+    }
     ColumnSchema newColumn = new ColumnSchema();
     if (parentName != null) {
       newColumn.setColumnName(parentName + "." + field.getFieldName());
@@ -156,7 +162,7 @@ public class TableSchemaBuilder {
     // SO, this will not have any impact.
     newColumn.setColumnUniqueId(field.getFieldName());
     newColumn.setColumnReferenceId(newColumn.getColumnUniqueId());
-    newColumn.setEncodingList(createEncoding(field.getDataType(), isSortColumn));
+    newColumn.setEncodingList(createEncoding(field.getDataType(), isSortColumn, isComplexChild));
     if (field.getDataType().isComplexType()) {
       if (field.getDataType().getName().equalsIgnoreCase("ARRAY")) {
         newColumn.setNumberOfChild(1);
@@ -209,6 +215,12 @@ public class TableSchemaBuilder {
   /**
    * Throw exception if {@param field} name is repeated
    */
+  private void checkRepeatColumnName(StructField field, String parentName) {
+    checkRepeatColumnName(
+        new StructField(parentName + "." + field.getFieldName(), field.getDataType(),
+            field.getChildren()));
+  }
+
   private void checkRepeatColumnName(StructField field) {
     for (ColumnSchema column : sortColumns) {
       if (column.getColumnName().equalsIgnoreCase(field.getFieldName())) {
@@ -234,9 +246,10 @@ public class TableSchemaBuilder {
     }
   }
 
-  private List<Encoding> createEncoding(DataType dataType, boolean isSortColumn) {
+  private List<Encoding> createEncoding(DataType dataType, boolean isSortColumn,
+      boolean isComplexChild) {
     List<Encoding> encodings = new LinkedList<>();
-    if (dataType == DataTypes.TIMESTAMP || dataType == DataTypes.DATE) {
+    if (dataType == DataTypes.DATE && !isComplexChild) {
       encodings.add(Encoding.DIRECT_DICTIONARY);
       encodings.add(Encoding.DICTIONARY);
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala
index bc44df0..6728cdf 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala
@@ -38,6 +38,8 @@ class TestComplexTypeQuery extends QueryTest with BeforeAndAfterAll {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
     CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+      .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION,
+        "force")
     sql("drop table if exists complexcarbontable")
     sql("drop table if exists complexhivetable")
     sql("drop table if exists complex_filter")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index 3c51efe..376501b 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -19,6 +19,7 @@ package org.apache.carbondata.spark.testsuite.createTable
 
 import java.sql.Timestamp
 import java.io.{File, FileFilter, IOException}
+import java.io.{File, FileFilter}
 import java.util
 
 import org.apache.commons.io.FileUtils
@@ -33,6 +34,7 @@ import org.apache.carbondata.core.datastore.filesystem.CarbonFile
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.util.CarbonUtil
 import org.apache.carbondata.sdk.file.AvroCarbonWriter
+import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
 import scala.collection.JavaConverters._
 import scala.collection.mutable
 
@@ -40,7 +42,7 @@ import org.apache.avro
 import org.apache.commons.lang.CharEncoding
 import tech.allegro.schema.json2avro.converter.JsonAvroConverter
 
-import org.apache.carbondata.core.metadata.datatype.DataTypes
+import org.apache.carbondata.core.metadata.datatype.{DataTypes, StructField}
 import org.apache.carbondata.sdk.file.{CarbonWriter, CarbonWriterBuilder, Field, Schema}
 
 
@@ -219,6 +221,9 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
   }
 
   override def beforeAll(): Unit = {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+        CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
     sql("DROP TABLE IF EXISTS sdkOutputTable")
   }
 
@@ -247,8 +252,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-  test("test create External Table with Schema with partition, should ignore schema and partition")
-  {
+  test("test create External Table with Schema with partition, should ignore schema and partition") {
     buildTestDataSingleFile()
     assert(new File(writerPath).exists())
     sql("DROP TABLE IF EXISTS sdkOutputTable")
@@ -270,8 +274,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
   }
 
 
-  test("test create External Table with insert into feature")
-  {
+  test("test create External Table with insert into feature") {
     buildTestDataSingleFile()
     assert(new File(writerPath).exists())
     sql("DROP TABLE IF EXISTS sdkOutputTable")
@@ -302,8 +305,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-  test("test create External Table with insert overwrite")
-  {
+  test("test create External Table with insert overwrite") {
     buildTestDataSingleFile()
     assert(new File(writerPath).exists())
     sql("DROP TABLE IF EXISTS sdkOutputTable")
@@ -341,8 +343,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
   }
 
 
-  test("test create External Table with Load")
-  {
+  test("test create External Table with Load") {
     buildTestDataSingleFile()
     assert(new File(writerPath).exists())
     sql("DROP TABLE IF EXISTS sdkOutputTable")
@@ -563,6 +564,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
+
   test("Read sdk writer output file without any file should fail") {
     buildTestDataSingleFile()
     deleteFile(writerPath, CarbonCommonConstants.FACT_FILE_EXT)
@@ -748,7 +750,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
   }
 
-
   test("Read sdk two writer output with same column name but different sort columns") {
     FileUtils.deleteDirectory(new File(writerPath))
     buildTestDataOtherDataType(3, Array[String]("name"))
@@ -814,7 +815,10 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-  private def WriteFilesWithAvroWriter(rows: Int, mySchema: String, json: String): Unit = {
+  private def WriteFilesWithAvroWriter(rows: Int,
+      mySchema: String,
+      json: String,
+      fields: Array[Field]) = {
     // conversion to GenericData.Record
     val nn = new avro.Schema.Parser().parse(mySchema)
     val converter = new JsonAvroConverter
@@ -822,8 +826,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       .convertToGenericDataRecord(json.getBytes(CharEncoding.UTF_8), nn)
 
     try {
-      val writer = CarbonWriter.builder
-        .withSchema(AvroCarbonWriter.getCarbonSchemaFromAvroSchema(mySchema))
+      val writer = CarbonWriter.builder.withSchema(new Schema(fields))
         .outputPath(writerPath).isTransactionalTable(false)
         .uniqueIdentifier(System.currentTimeMillis()).buildWriterForAvroInput
       var i = 0
@@ -860,7 +863,16 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       """.stripMargin
 
     val json = """ {"name":"bob", "age":10, "address" : {"street":"abc", "city":"bang"}} """
-    WriteFilesWithAvroWriter(rows, mySchema, json)
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("street", DataTypes.STRING))
+    fld.add(new StructField("city", DataTypes.STRING))
+    fields(2) = new Field("address", "struct", fld)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
   }
 
   def buildAvroTestDataStructType(): Any = {
@@ -899,7 +911,17 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
                    """.stripMargin
 
     val json: String = """ {"name": "bob","age": 10,"address": ["abc", "defg"]} """
-    WriteFilesWithAvroWriter(rows, mySchema, json)
+
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+    // fields[1] = new Field("age", DataTypes.INT);
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("street", DataTypes.STRING))
+    fields(2) = new Field("address", "array", fld)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
   }
 
   def buildAvroTestDataSingleFileArrayType(): Any = {
@@ -943,7 +965,18 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       """ {"name":"bob", "age":10,
         |"address" : {"street":"abc", "city":"bang"},
         |"doorNum" : [1,2,3,4]}""".stripMargin
-    WriteFilesWithAvroWriter(rows, mySchema, json)
+
+    val fields = new Array[Field](4)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("street", DataTypes.STRING))
+    fld.add(new StructField("city", DataTypes.STRING))
+    fields(2) = new Field("address", "struct", fld)
+    val fld1 = new util.ArrayList[StructField]
+    fld1.add(new StructField("eachDoorNum", DataTypes.INT))
+    fields(3) = new Field("doorNum", "array", fld1)
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
   }
 
   def buildAvroTestDataBothStructArrayType(): Any = {
@@ -951,6 +984,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     buildAvroTestDataStructWithArrayType(3, null)
   }
 
+
   // ArrayOfStruct test
   def buildAvroTestDataArrayOfStruct(rows: Int, options: util.Map[String, String]): Any = {
     FileUtils.deleteDirectory(new File(writerPath))
@@ -995,7 +1029,20 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |{"street":"def","city":"city2"},
         |{"street":"ghi","city":"city3"},
         |{"street":"jkl","city":"city4"}]} """.stripMargin
-    WriteFilesWithAvroWriter(rows, mySchema, json)
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("street", DataTypes.STRING))
+    fld.add(new StructField("city", DataTypes.STRING))
+
+    val fld2 = new util.ArrayList[StructField]
+    fld2.add(new StructField("my_address", DataTypes.createStructType(fld), fld))
+    fields(2) = new Field("doorNum", DataTypes.createArrayType(fld2.get(0).getDataType), fld2)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
   }
 
   def buildAvroTestDataArrayOfStructType(): Any = {
@@ -1003,6 +1050,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     buildAvroTestDataArrayOfStruct(3, null)
   }
 
+
   // StructOfArray test
   def buildAvroTestDataStructOfArray(rows: Int, options: util.Map[String, String]): Any = {
     FileUtils.deleteDirectory(new File(writerPath))
@@ -1064,7 +1112,21 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
                  |		]
                  |	}
                  |} """.stripMargin
-    WriteFilesWithAvroWriter(rows, mySchema, json)
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val fld2 = new util.ArrayList[StructField]
+    fld2.add(new StructField("street", DataTypes.STRING))
+    fld2.add(new StructField("city", DataTypes.STRING))
+
+    val fld1 = new util.ArrayList[StructField]
+    fld1.add(new StructField("eachDoorNum", DataTypes.INT))
+    fld2.add(new StructField("doorNum", DataTypes.createArrayType(DataTypes.INT), fld1))
+
+    fields(2) = new Field("address","struct",fld2)
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
   }
 
   def buildAvroTestDataStructOfArrayType(): Any = {
@@ -1072,6 +1134,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     buildAvroTestDataStructOfArray(3, null)
   }
 
+
   test("Read sdk writer Avro output Record Type") {
     buildAvroTestDataStructType()
     assert(new File(writerPath).exists())
@@ -1080,6 +1143,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
          |'$writerPath' """.stripMargin)
 
+
     checkAnswer(sql("select * from sdkOutputTable"), Seq(
       Row("bob", 10, Row("abc","bang")),
       Row("bob", 10, Row("abc","bang")),
@@ -1138,6 +1202,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
+
   test("Read sdk writer Avro output with Array of struct") {
     buildAvroTestDataArrayOfStructType()
     assert(new File(writerPath).exists())
@@ -1163,6 +1228,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
+
   // Struct of array
   test("Read sdk writer Avro output with struct of Array") {
     buildAvroTestDataStructOfArrayType()
@@ -1264,7 +1330,21 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |		}
         |	]
         |} """.stripMargin
-    WriteFilesWithAvroWriter(rows, mySchema, json)
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("street", DataTypes.STRING))
+    fld.add(new StructField("city", DataTypes.STRING))
+    fld.add(new StructField("FloorNum", DataTypes.createArrayType(DataTypes.INT)))
+
+    val fld2 = new util.ArrayList[StructField]
+    fld2.add(new StructField("my_address", DataTypes.createStructType(fld), fld))
+    fields(2) = new Field("doorNum", DataTypes.createArrayType(fld2.get(0).getDataType), fld2)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
   }
 
   def buildAvroTestDataMultiLevel3Type(): Any = {
@@ -1302,6 +1382,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
+
   // test multi level -- 3 levels [array of struct of struct of string, int]
   def buildAvroTestDataMultiLevel3_1(rows: Int, options: util.Map[String, String]): Any = {
     FileUtils.deleteDirectory(new File(writerPath))
@@ -1381,7 +1462,26 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |		}
         |	]
         |}  """.stripMargin
-    WriteFilesWithAvroWriter(rows, mySchema, json)
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("street", DataTypes.STRING))
+    fld.add(new StructField("city", DataTypes.STRING))
+
+    val subFld = new util.ArrayList[StructField]
+    subFld.add(new StructField("wing", DataTypes.STRING))
+    subFld.add(new StructField("number", DataTypes.INT))
+    fld.add(new StructField("FloorNum", DataTypes.createStructType(subFld)))
+
+    // array of struct of struct
+    val fld2 = new util.ArrayList[StructField]
+    fld2.add(new StructField("my_address", DataTypes.createStructType(fld), fld))
+    fields(2) = new Field("doorNum", DataTypes.createArrayType(fld2.get(0).getDataType), fld2)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
   }
 
   def buildAvroTestDataMultiLevel3_1Type(): Any = {
@@ -1461,7 +1561,22 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |        	"BuildNum": [[[1,2,3],[4,5,6]],[[10,20,30],[40,50,60]]]
         |        }   """.stripMargin
 
-    WriteFilesWithAvroWriter(rows, mySchema, json)
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val subFld = new util.ArrayList[StructField]
+    subFld.add(new StructField("EachDoorNum", DataTypes.INT))
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("DoorNum", DataTypes.createArrayType(DataTypes.INT), subFld))
+    // array of struct of struct
+    val doorNum = new util.ArrayList[StructField]
+    doorNum.add(new StructField("FloorNum",
+      DataTypes.createArrayType(DataTypes.createArrayType(DataTypes.INT)), fld))
+    fields(2) = new Field("BuildNum", "array", doorNum)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
   }
 
   def buildAvroTestDataMultiLevel3_2Type(): Any = {
@@ -1500,6 +1615,8 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
+
+
   // test multi level -- 4 levels [array of array of array of struct]
   def buildAvroTestDataMultiLevel4(rows: Int, options: util.Map[String, String]): Any = {
     FileUtils.deleteDirectory(new File(writerPath))
@@ -1578,7 +1695,30 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |		]
         |	]
         |} """.stripMargin
-    WriteFilesWithAvroWriter(rows, mySchema, json)
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val subFld = new util.ArrayList[StructField]
+    subFld.add(new StructField("EachDoorNum", DataTypes.INT))
+
+    val address = new util.ArrayList[StructField]
+    address.add(new StructField("street", DataTypes.STRING))
+    address.add(new StructField("city", DataTypes.STRING))
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("DoorNum",
+        DataTypes.createArrayType(DataTypes.createStructType(address)),
+        subFld))
+    // array of struct of struct
+    val doorNum = new util.ArrayList[StructField]
+    doorNum.add(new StructField("FloorNum",
+      DataTypes.createArrayType(
+        DataTypes.createArrayType(DataTypes.createStructType(address))), fld))
+    fields(2) = new Field("BuildNum", "array", doorNum)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
   }
 
   def buildAvroTestDataMultiLevel4Type(): Any = {
@@ -1604,4 +1744,228 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-}
\ No newline at end of file
+  test(
+    "test if exception is thrown when a column which is not in schema is specified in sort columns")
+  {
+    val schema1 =
+      """{
+        |	"namespace": "com.apache.schema",
+        |	"type": "record",
+        |	"name": "StudentActivity",
+        |	"fields": [
+        |		{
+        |			"name": "id",
+        |			"type": "int"
+        |		},
+        |		{
+        |			"name": "course_details",
+        |			"type": {
+        |				"name": "course_details",
+        |				"type": "record",
+        |				"fields": [
+        |					{
+        |						"name": "course_struct_course_time",
+        |						"type": "string"
+        |					}
+        |				]
+        |			}
+        |		}
+        |	]
+        |}""".stripMargin
+
+    val json1 =
+      """{"id": 101,"course_details": { "course_struct_course_time":"2014-01-05"  }}""".stripMargin
+
+    val nn = new org.apache.avro.Schema.Parser().parse(schema1)
+    val converter = new JsonAvroConverter
+    val record = converter
+      .convertToGenericDataRecord(json1.getBytes(CharEncoding.UTF_8), nn)
+
+    val fields = new Array[Field](2)
+    fields(0) = new Field("id", DataTypes.INT)
+    val fld_s = new java.util.ArrayList[StructField]
+    fld_s.add(new StructField("course_struct_course_time", DataTypes.STRING))
+    fields(1) = new Field("course_details", "struct", fld_s)
+
+    assert(intercept[RuntimeException] {
+      val writer = CarbonWriter.builder.withSchema(new Schema(fields)).sortBy(Array("name", "id"))
+        .outputPath(writerPath).isTransactionalTable(false).buildWriterForAvroInput
+      writer.write(record)
+      writer.close()
+    }.getMessage.toLowerCase.contains("column: name specified in sort columns"))
+  }
+
+  test("test if data load is success with a struct having timestamp column  ") {
+    val schema1 =
+      """{
+        |	"namespace": "com.apache.schema",
+        |	"type": "record",
+        |	"name": "StudentActivity",
+        |	"fields": [
+        |		{
+        |			"name": "id",
+        |			"type": "int"
+        |		},
+        |		{
+        |			"name": "course_details",
+        |			"type": {
+        |				"name": "course_details",
+        |				"type": "record",
+        |				"fields": [
+        |					{
+        |						"name": "course_struct_course_time",
+        |						"type": "string"
+        |					}
+        |				]
+        |			}
+        |		}
+        |	]
+        |}""".stripMargin
+
+    val json1 =
+      """{"id": 101,"course_details": { "course_struct_course_time":"2014-01-05 00:00:00"  }}""".stripMargin
+    val nn = new org.apache.avro.Schema.Parser().parse(schema1)
+    val converter = new JsonAvroConverter
+    val record = converter
+      .convertToGenericDataRecord(json1.getBytes(CharEncoding.UTF_8), nn)
+
+    val fields = new Array[Field](2)
+    fields(0) = new Field("id", DataTypes.INT)
+    val fld_s = new java.util.ArrayList[StructField]
+    fld_s.add(new StructField("course_struct_course_time", DataTypes.TIMESTAMP))
+    fields(1) = new Field("course_details", "struct", fld_s)
+
+    val writer = CarbonWriter.builder.withSchema(new Schema(fields)).sortBy(Array("id"))
+      .outputPath(writerPath).isTransactionalTable(false).buildWriterForAvroInput
+    writer.write(record)
+    writer.close()
+  }
+
+  test(
+    "test is dataload is successful if childcolumn has same name as one of the other fields(not " +
+    "complex)")
+  {
+    val schema =
+      """{
+        |	"type": "record",
+        |	"name": "Order",
+        |	"namespace": "com.apache.schema",
+        |	"fields": [
+        |		{
+        |			"name": "id",
+        |			"type": "long"
+        |		},
+        |		{
+        |			"name": "entries",
+        |			"type": {
+        |				"type": "array",
+        |				"items": {
+        |					"type": "record",
+        |					"name": "Entry",
+        |					"fields": [
+        |						{
+        |							"name": "id",
+        |							"type": "long"
+        |						}
+        |					]
+        |				}
+        |			}
+        |		}
+        |	]
+        |}""".stripMargin
+    val json1 =
+      """{"id": 101, "entries": [ {"id":1234}, {"id":3212}  ]}""".stripMargin
+
+    val nn = new org.apache.avro.Schema.Parser().parse(schema)
+    val converter = new JsonAvroConverter
+    val record = converter
+      .convertToGenericDataRecord(json1.getBytes(CharEncoding.UTF_8), nn)
+
+    val fields = new Array[Field](2)
+    fields(0) = new Field("id", DataTypes.LONG)
+    val fld_s = new java.util.ArrayList[StructField]
+    fld_s.add(new StructField("id", DataTypes.LONG))
+    fields(1) = new Field("entries", DataTypes.createArrayType(DataTypes.createStructType(fld_s)))
+    val writer = CarbonWriter.builder.withSchema(new Schema(fields))
+      .outputPath(writerPath).isTransactionalTable(false).buildWriterForAvroInput
+    writer.write(record)
+    writer.close()
+  }
+
+  test("test if data load with various bad_records_action") {
+    val schema =
+      """{
+        |	"namespace": "com.apache.schema",
+        |	"type": "record",
+        |	"name": "StudentActivity",
+        |	"fields": [
+        |		{
+        |			"name": "id",
+        |			"type": "string"
+        |		},
+        |		{
+        |			"name": "course_details",
+        |			"type": {
+        |				"name": "course_details",
+        |				"type": "record",
+        |				"fields": [
+        |					{
+        |						"name": "course_struct_course_string",
+        |						"type": "string"
+        |					}
+        |				]
+        |			}
+        |		},
+        |		{
+        |			"name": "salary_string",
+        |			"type": {
+        |				"type": "array",
+        |				"items": "string"
+        |			}
+        |		}
+        |	]
+        |}""".stripMargin
+    val json1 =
+      """{
+        |	"id": "cust_1",
+        |	"course_details": {
+        |		"course_struct_course_string": "asd"
+        |	},
+        |	"salary_string": [
+        |		"xyz",
+        |		"abc"
+        |	]
+        |}""".stripMargin
+
+    val nn = new org.apache.avro.Schema.Parser().parse(schema)
+    val converter = new JsonAvroConverter
+    val record = converter
+      .convertToGenericDataRecord(json1.getBytes(CharEncoding.UTF_8), nn)
+
+    val fields = new Array[Field](3)
+    fields(0)=new Field("id", DataTypes.STRING)
+    val fld_s = new java.util.ArrayList[StructField]
+    fld_s.add(new StructField("carbon_int", DataTypes.INT))
+    fields(1)=new Field("course_details", "struct",fld_s)
+
+    val fld_a = new java.util.ArrayList[StructField]
+    fld_a.add(new StructField("carbon_array", DataTypes.INT))
+    fields(2)=new Field("salary_string", "array",fld_a)
+
+    val loadOptions = new util.HashMap[String, String]()
+    loadOptions.put("bad_records_action", "fail")
+    assert(intercept[Exception] {
+      val writer = CarbonWriter.builder.withSchema(new Schema(fields)).outputPath(writerPath)
+        .isTransactionalTable(false).withLoadOptions(loadOptions).buildWriterForAvroInput
+      writer.write(record)
+      writer.close()
+    }.getMessage.contains("Data load failed due to bad record"))
+
+    loadOptions.put("bad_records_action", "FORCE")
+      val writer = CarbonWriter.builder.withSchema(new Schema(fields)).outputPath(writerPath)
+        .isTransactionalTable(false).withLoadOptions(loadOptions).buildWriterForAvroInput
+      writer.write(record)
+      writer.close()
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
index d7d59ce..cc2619e 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
@@ -27,7 +27,7 @@ import org.apache.carbondata.core.devapi.DictionaryGenerationException;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
 import org.apache.carbondata.core.keygenerator.KeyGenerator;
 import org.apache.carbondata.processing.loading.complexobjects.ArrayObject;
-
+import org.apache.carbondata.processing.loading.converter.BadRecordLogHolder;
 
 /**
  * Array DataType stateless object used in data loading
@@ -151,17 +151,16 @@ public class ArrayDataType implements GenericDataType<ArrayObject> {
     return true;
   }
 
-  @Override
-  public void writeByteArray(ArrayObject input, DataOutputStream dataOutputStream)
-      throws IOException, DictionaryGenerationException {
+  @Override public void writeByteArray(ArrayObject input, DataOutputStream dataOutputStream,
+      BadRecordLogHolder logHolder) throws IOException, DictionaryGenerationException {
     if (input == null) {
       dataOutputStream.writeInt(1);
-      children.writeByteArray(null, dataOutputStream);
+      children.writeByteArray(null, dataOutputStream, logHolder);
     } else {
       Object[] data = input.getData();
       dataOutputStream.writeInt(data.length);
       for (Object eachInput : data) {
-        children.writeByteArray(eachInput, dataOutputStream);
+        children.writeByteArray(eachInput, dataOutputStream, logHolder);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
index 21ad83d..f48a91d 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
@@ -26,6 +26,7 @@ import java.util.List;
 import org.apache.carbondata.core.devapi.DictionaryGenerationException;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
 import org.apache.carbondata.core.keygenerator.KeyGenerator;
+import org.apache.carbondata.processing.loading.converter.BadRecordLogHolder;
 
 /**
  * Generic DataType interface which will be used while data loading for complex types like Array &
@@ -58,7 +59,7 @@ public interface GenericDataType<T> {
    * @param dataOutputStream
    * @throws IOException
    */
-  void writeByteArray(T input, DataOutputStream dataOutputStream)
+  void writeByteArray(T input, DataOutputStream dataOutputStream, BadRecordLogHolder logHolder)
       throws IOException, DictionaryGenerationException;
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
index e34c184..fdfc3bb 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
@@ -48,10 +48,12 @@ import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.util.CarbonUtil;
 import org.apache.carbondata.core.util.DataTypeUtil;
+import org.apache.carbondata.processing.loading.converter.BadRecordLogHolder;
 import org.apache.carbondata.processing.loading.dictionary.DictionaryServerClientDictionary;
 import org.apache.carbondata.processing.loading.dictionary.DirectDictionary;
 import org.apache.carbondata.processing.loading.dictionary.PreCreatedDictionary;
 import org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException;
+import org.apache.carbondata.processing.util.CarbonDataProcessorUtil;
 
 /**
  * Primitive DataType stateless object used in data loading
@@ -265,19 +267,29 @@ public class PrimitiveDataType implements GenericDataType<Object> {
     return isDictionary;
   }
 
-  @Override public void writeByteArray(Object input, DataOutputStream dataOutputStream)
-      throws IOException, DictionaryGenerationException {
-
+  @Override public void writeByteArray(Object input, DataOutputStream dataOutputStream,
+      BadRecordLogHolder logHolder) throws IOException, DictionaryGenerationException {
     String parsedValue =
         input == null ? null : DataTypeUtil.parseValue(input.toString(), carbonDimension);
+    String message = logHolder.getColumnMessageMap().get(carbonDimension.getColName());
     if (this.isDictionary) {
       Integer surrogateKey;
       if (null == parsedValue) {
         surrogateKey = CarbonCommonConstants.MEMBER_DEFAULT_VAL_SURROGATE_KEY;
+        if (null == message) {
+          message = CarbonDataProcessorUtil
+              .prepareFailureReason(carbonDimension.getColName(), carbonDimension.getDataType());
+          logHolder.getColumnMessageMap().put(carbonDimension.getColName(), message);
+          logHolder.setReason(message);
+        }
       } else {
         surrogateKey = dictionaryGenerator.getOrGenerateKey(parsedValue);
         if (surrogateKey == CarbonCommonConstants.INVALID_SURROGATE_KEY) {
           surrogateKey = CarbonCommonConstants.MEMBER_DEFAULT_VAL_SURROGATE_KEY;
+          message = CarbonDataProcessorUtil
+              .prepareFailureReason(carbonDimension.getColName(), carbonDimension.getDataType());
+          logHolder.getColumnMessageMap().put(carbonDimension.getColName(), message);
+          logHolder.setReason(message);
         }
       }
       dataOutputStream.writeInt(surrogateKey);
@@ -285,15 +297,15 @@ public class PrimitiveDataType implements GenericDataType<Object> {
       // Transform into ByteArray for No Dictionary.
       // TODO have to refactor and place all the cases present in NonDictionaryFieldConverterImpl
       if (null == parsedValue && this.carbonDimension.getDataType() != DataTypes.STRING) {
-        updateNullValue(dataOutputStream);
+        updateNullValue(dataOutputStream, logHolder);
       } else if (null == parsedValue || parsedValue.equals(nullformat)) {
-        updateNullValue(dataOutputStream);
+        updateNullValue(dataOutputStream, logHolder);
       } else {
         String dateFormat = null;
         if (this.carbonDimension.getDataType() == DataTypes.DATE) {
-          dateFormat = this.carbonDimension.getDateFormat();
+          dateFormat = carbonDimension.getDateFormat();
         } else if (this.carbonDimension.getDataType() == DataTypes.TIMESTAMP) {
-          dateFormat = this.carbonDimension.getTimestampFormat();
+          dateFormat = carbonDimension.getTimestampFormat();
         }
 
         try {
@@ -318,9 +330,12 @@ public class PrimitiveDataType implements GenericDataType<Object> {
               updateValueToByteStream(dataOutputStream,
                   parsedValue.getBytes(Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET)));
             } else {
-              updateNullValue(dataOutputStream);
+              updateNullValue(dataOutputStream, logHolder);
             }
           }
+        } catch (NumberFormatException e) {
+          // Update logHolder for bad record and put null in dataOutputStream.
+          updateNullValue(dataOutputStream, logHolder);
         } catch (CarbonDataLoadingException e) {
           throw e;
         } catch (Throwable ex) {
@@ -338,7 +353,8 @@ public class PrimitiveDataType implements GenericDataType<Object> {
     dataOutputStream.write(value);
   }
 
-  private void updateNullValue(DataOutputStream dataOutputStream) throws IOException {
+  private void updateNullValue(DataOutputStream dataOutputStream, BadRecordLogHolder logHolder)
+      throws IOException {
     if (this.carbonDimension.getDataType() == DataTypes.STRING) {
       dataOutputStream.writeInt(CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY.length);
       dataOutputStream.write(CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY);
@@ -346,6 +362,13 @@ public class PrimitiveDataType implements GenericDataType<Object> {
       dataOutputStream.writeInt(CarbonCommonConstants.EMPTY_BYTE_ARRAY.length);
       dataOutputStream.write(CarbonCommonConstants.EMPTY_BYTE_ARRAY);
     }
+    String message = logHolder.getColumnMessageMap().get(carbonDimension.getColName());
+    if (null == message) {
+      message = CarbonDataProcessorUtil
+          .prepareFailureReason(carbonDimension.getColName(), carbonDimension.getDataType());
+      logHolder.getColumnMessageMap().put(carbonDimension.getColName(), message);
+      logHolder.setReason(message);
+    }
   }
 
   @Override public void fillCardinality(List<Integer> dimCardWithComplex) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
index 4fe6255..bb3da6c 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
@@ -27,6 +27,7 @@ import org.apache.carbondata.core.devapi.DictionaryGenerationException;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
 import org.apache.carbondata.core.keygenerator.KeyGenerator;
 import org.apache.carbondata.processing.loading.complexobjects.StructObject;
+import org.apache.carbondata.processing.loading.converter.BadRecordLogHolder;
 
 /**
  * Struct DataType stateless object used in data loading
@@ -150,22 +151,22 @@ public class StructDataType implements GenericDataType<StructObject> {
     return true;
   }
 
-  @Override public void writeByteArray(StructObject input, DataOutputStream dataOutputStream)
-      throws IOException, DictionaryGenerationException {
+  @Override public void writeByteArray(StructObject input, DataOutputStream dataOutputStream,
+      BadRecordLogHolder logHolder) throws IOException, DictionaryGenerationException {
     dataOutputStream.writeInt(children.size());
     if (input == null) {
       for (int i = 0; i < children.size(); i++) {
-        children.get(i).writeByteArray(null, dataOutputStream);
+        children.get(i).writeByteArray(null, dataOutputStream, logHolder);
       }
     } else {
       Object[] data = input.getData();
       for (int i = 0; i < data.length && i < children.size(); i++) {
-        children.get(i).writeByteArray(data[i], dataOutputStream);
+        children.get(i).writeByteArray(data[i], dataOutputStream, logHolder);
       }
 
       // For other children elements which dont have data, write empty
       for (int i = data.length; i < children.size(); i++) {
-        children.get(i).writeByteArray(null, dataOutputStream);
+        children.get(i).writeByteArray(null, dataOutputStream, logHolder);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/processing/src/main/java/org/apache/carbondata/processing/loading/DataLoadProcessBuilder.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/DataLoadProcessBuilder.java b/processing/src/main/java/org/apache/carbondata/processing/loading/DataLoadProcessBuilder.java
index 2f904ed..17d0c76 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/DataLoadProcessBuilder.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/DataLoadProcessBuilder.java
@@ -237,6 +237,15 @@ public final class DataLoadProcessBuilder {
       }
       if (column.isComplex()) {
         complexDataFields.add(dataField);
+        List<CarbonDimension> childDimensions =
+            ((CarbonDimension) dataField.getColumn()).getListOfChildDimensions();
+        for (CarbonDimension childDimension : childDimensions) {
+          if (childDimension.getDataType() == DataTypes.DATE) {
+            childDimension.setDateFormat(loadModel.getDateFormat());
+          } else if (childDimension.getDataType() == DataTypes.TIMESTAMP) {
+            childDimension.setTimestampFormat(loadModel.getTimestampformat());
+          }
+        }
       } else {
         dataFields.add(dataField);
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/ComplexFieldConverterImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/ComplexFieldConverterImpl.java b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/ComplexFieldConverterImpl.java
index b26ef36..4e46f9f 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/ComplexFieldConverterImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/ComplexFieldConverterImpl.java
@@ -44,7 +44,7 @@ public class ComplexFieldConverterImpl extends AbstractDictionaryFieldConverterI
     ByteArrayOutputStream byteArray = new ByteArrayOutputStream();
     DataOutputStream dataOutputStream = new DataOutputStream(byteArray);
     try {
-      genericDataType.writeByteArray(object, dataOutputStream);
+      genericDataType.writeByteArray(object, dataOutputStream, logHolder);
       dataOutputStream.close();
       row.update(byteArray.toByteArray(), index);
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/DirectDictionaryFieldConverterImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/DirectDictionaryFieldConverterImpl.java b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/DirectDictionaryFieldConverterImpl.java
index b49cd90..64ddf27 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/DirectDictionaryFieldConverterImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/DirectDictionaryFieldConverterImpl.java
@@ -54,7 +54,6 @@ public class DirectDictionaryFieldConverterImpl extends AbstractDictionaryFieldC
       this.directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
           .getDirectDictionaryGenerator(dataField.getColumn().getDataType(),
               dataField.getTimestampFormat());
-
     } else {
       this.directDictionaryGenerator = DirectDictionaryKeyGeneratorFactory
           .getDirectDictionaryGenerator(dataField.getColumn().getDataType());

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java b/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java
index 13dd75c..9a9d09e 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java
@@ -79,7 +79,20 @@ public class CarbonLoadModelBuilder {
 
     // we have provided 'fileheader', so it hadoopConf can be null
     build(options, optionsFinal, model, null);
-
+    String timestampFormat = options.get("timestampformat");
+    if (timestampFormat == null) {
+      timestampFormat = CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+              CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT);
+    }
+    String dateFormat = options.get("dateFormat");
+    if (dateFormat == null) {
+      dateFormat = CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
+              CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT);
+    }
+    model.setDateFormat(dateFormat);
+    model.setTimestampformat(timestampFormat);
     model.setUseOnePass(Boolean.parseBoolean(Maps.getOrDefault(options, "onepass", "false")));
     model.setDictionaryServerHost(Maps.getOrDefault(options, "dicthost", null));
     try {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
index 77f5260..c99a413 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
@@ -35,11 +35,15 @@ import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.DataTypeUtil;
 import org.apache.carbondata.processing.datatypes.GenericDataType;
 import org.apache.carbondata.processing.loading.AbstractDataLoadProcessorStep;
+import org.apache.carbondata.processing.loading.BadRecordsLogger;
+import org.apache.carbondata.processing.loading.BadRecordsLoggerProvider;
 import org.apache.carbondata.processing.loading.CarbonDataLoadConfiguration;
 import org.apache.carbondata.processing.loading.DataField;
 import org.apache.carbondata.processing.loading.constants.DataLoadProcessorConstants;
+import org.apache.carbondata.processing.loading.converter.BadRecordLogHolder;
 import org.apache.carbondata.processing.loading.converter.impl.FieldEncoderFactory;
 import org.apache.carbondata.processing.loading.converter.impl.RowConverterImpl;
+import org.apache.carbondata.processing.loading.exception.BadRecordFoundException;
 import org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException;
 import org.apache.carbondata.processing.loading.row.CarbonRowBatch;
 import org.apache.carbondata.processing.util.CarbonDataProcessorUtil;
@@ -134,7 +138,7 @@ public class InputProcessorStepWithNoConverterImpl extends AbstractDataLoadProce
       outIterators[i] =
           new InputProcessorIterator(readerIterators[i], batchSize, configuration.isPreFetch(),
               rowCounter, orderOfData, noDictionaryMapping, dataTypes,
-              configuration.getDataFields(), dataFieldsWithComplexDataType);
+              configuration, dataFieldsWithComplexDataType);
     }
     return outIterators;
   }
@@ -207,11 +211,13 @@ public class InputProcessorStepWithNoConverterImpl extends AbstractDataLoadProce
 
     private int[] orderOfData;
 
+    private CarbonDataLoadConfiguration configuration;
+
     private Map<Integer, GenericDataType> dataFieldsWithComplexDataType;
 
     public InputProcessorIterator(List<CarbonIterator<Object[]>> inputIterators, int batchSize,
         boolean preFetch, AtomicLong rowCounter, int[] orderOfData, boolean[] noDictionaryMapping,
-        DataType[] dataTypes, DataField[] dataFields,
+        DataType[] dataTypes, CarbonDataLoadConfiguration configuration,
         Map<Integer, GenericDataType> dataFieldsWithComplexDataType) {
       this.inputIterators = inputIterators;
       this.batchSize = batchSize;
@@ -223,7 +229,8 @@ public class InputProcessorStepWithNoConverterImpl extends AbstractDataLoadProce
       this.firstTime = true;
       this.noDictionaryMapping = noDictionaryMapping;
       this.dataTypes = dataTypes;
-      this.dataFields = dataFields;
+      this.dataFields = configuration.getDataFields();
+      this.configuration = configuration;
       this.orderOfData = orderOfData;
       this.dataFieldsWithComplexDataType = dataFieldsWithComplexDataType;
     }
@@ -272,6 +279,9 @@ public class InputProcessorStepWithNoConverterImpl extends AbstractDataLoadProce
 
     private Object[] convertToNoDictionaryToBytes(Object[] data, DataField[] dataFields) {
       Object[] newData = new Object[data.length];
+      BadRecordLogHolder logHolder = new BadRecordLogHolder();
+      BadRecordsLogger badRecordLogger =
+          BadRecordsLoggerProvider.createBadRecordLogger(configuration);
       for (int i = 0; i < data.length; i++) {
         if (i < noDictionaryMapping.length && noDictionaryMapping[i]) {
           newData[i] = DataTypeUtil
@@ -284,11 +294,21 @@ public class InputProcessorStepWithNoConverterImpl extends AbstractDataLoadProce
             try {
               GenericDataType complextType =
                   dataFieldsWithComplexDataType.get(dataFields[i].getColumn().getOrdinal());
-
-              complextType.writeByteArray(data[orderOfData[i]], dataOutputStream);
-
+              complextType.writeByteArray(data[orderOfData[i]], dataOutputStream, logHolder);
+              if (!logHolder.isLogged() && logHolder.isBadRecordNotAdded()) {
+                badRecordLogger.addBadRecordsToBuilder(data, logHolder.getReason());
+                if (badRecordLogger.isDataLoadFail()) {
+                  String error = "Data load failed due to bad record: " + logHolder.getReason();
+                  if (!badRecordLogger.isBadRecordLoggerEnable()) {
+                    error += "Please enable bad record logger to know the detail reason.";
+                  }
+                  throw new BadRecordFoundException(error);
+                }
+              }
               dataOutputStream.close();
               newData[i] = byteArray.toByteArray();
+            } catch (BadRecordFoundException e) {
+              throw new CarbonDataLoadingException("Loading Exception: " + e.getMessage(), e);
             } catch (Exception e) {
               throw new CarbonDataLoadingException("Loading Exception", e);
             }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6b70b7e4/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
index f541dbb..00ba8a5 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
@@ -431,6 +431,20 @@ public class CarbonWriterBuilder {
     // to child of complex array type in the order val1, val2 so that each array type child is
     // differentiated to any level
     AtomicInteger valIndex = new AtomicInteger(0);
+    // Check if any of the columns specified in sort columns are missing from schema.
+    for (String sortColumn: sortColumnsList) {
+      boolean exists = false;
+      for (Field field : fields) {
+        if (field.getFieldName().equalsIgnoreCase(sortColumn)) {
+          exists = true;
+          break;
+        }
+      }
+      if (!exists) {
+        throw new RuntimeException(
+            "column: " + sortColumn + " specified in sort columns does not exist in schema");
+      }
+    }
     for (Field field : fields) {
       if (null != field) {
         int isSortColumn = sortColumnsList.indexOf(field.getFieldName());
@@ -442,9 +456,9 @@ public class CarbonWriterBuilder {
                 " sort columns not supported for " + "array, struct, double, float, decimal ");
           }
         }
-
         if (field.getChildren() != null && field.getChildren().size() > 0) {
           if (field.getDataType().getName().equalsIgnoreCase("ARRAY")) {
+            checkForUnsupportedDataTypes(field.getChildren().get(0).getDataType());
             // Loop through the inner columns and for a StructData
             DataType complexType =
                 DataTypes.createArrayType(field.getChildren().get(0).getDataType());
@@ -455,6 +469,7 @@ public class CarbonWriterBuilder {
             List<StructField> structFieldsArray =
                 new ArrayList<StructField>(field.getChildren().size());
             for (StructField childFld : field.getChildren()) {
+              checkForUnsupportedDataTypes(childFld.getDataType());
               structFieldsArray
                   .add(new StructField(childFld.getFieldName(), childFld.getDataType()));
             }
@@ -475,6 +490,13 @@ public class CarbonWriterBuilder {
     }
   }
 
+  private void checkForUnsupportedDataTypes(DataType dataType) {
+    if (dataType == DataTypes.DOUBLE || dataType == DataTypes.DATE || DataTypes
+        .isDecimal(dataType)) {
+      throw new RuntimeException("Unsupported data type: " + dataType.getName());
+    }
+  }
+
   /**
    * Save the schema of the {@param table} to {@param persistFilePath}
    * @param table table object containing schema


[23/50] [abbrv] carbondata git commit: [CARBONDATA-2460] [CARBONDATA-2461] [CARBONDATA-2462] Fixed bug in AvroCarbonWriter

Posted by ma...@apache.org.
[CARBONDATA-2460] [CARBONDATA-2461] [CARBONDATA-2462] Fixed bug in AvroCarbonWriter

Issue1: If Null type is passed from avro schema then Unsupported data
type exception is thrown.
Solution1: Ignore column which has NULL data type.

Issue2: Array fields were being cast to ArrayList without any instance
check.
Solution2: Check the instance of Array fields and cast appropriately.

This closes #2291


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/3d8b085a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/3d8b085a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/3d8b085a

Branch: refs/heads/spark-2.3
Commit: 3d8b085a55f551122c7528b6981f1785a44fef3c
Parents: 61afa42
Author: kunal642 <ku...@gmail.com>
Authored: Wed May 9 18:32:23 2018 +0530
Committer: kumarvishal09 <ku...@gmail.com>
Committed: Fri May 11 13:38:53 2018 +0530

----------------------------------------------------------------------
 .../TestNonTransactionalCarbonTable.scala       |  47 ++++++++-
 .../carbondata/sdk/file/AvroCarbonWriter.java   | 103 ++++++++++++++-----
 2 files changed, 122 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/3d8b085a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index 376501b..86fda21 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -32,8 +32,6 @@ import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandExcepti
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile
 import org.apache.carbondata.core.datastore.impl.FileFactory
-import org.apache.carbondata.core.util.CarbonUtil
-import org.apache.carbondata.sdk.file.AvroCarbonWriter
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
 import scala.collection.JavaConverters._
 import scala.collection.mutable
@@ -43,7 +41,7 @@ import org.apache.commons.lang.CharEncoding
 import tech.allegro.schema.json2avro.converter.JsonAvroConverter
 
 import org.apache.carbondata.core.metadata.datatype.{DataTypes, StructField}
-import org.apache.carbondata.sdk.file.{CarbonWriter, CarbonWriterBuilder, Field, Schema}
+import org.apache.carbondata.sdk.file.{AvroCarbonWriter, CarbonWriter, CarbonWriterBuilder, Field, Schema}
 
 
 class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
@@ -51,7 +49,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
   var writerPath = new File(this.getClass.getResource("/").getPath
                             +
                             "../." +
-                            "./src/test/resources/SparkCarbonFileFormat/WriterOutput/")
+                            "./target/SparkCarbonFileFormat/WriterOutput/")
     .getCanonicalPath
   //getCanonicalPath gives path with \, so code expects /. Need to handle in code ?
   writerPath = writerPath.replace("\\", "/")
@@ -1795,6 +1793,47 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     }.getMessage.toLowerCase.contains("column: name specified in sort columns"))
   }
 
+  test("test if load is passing with NULL type") {
+    val schema1 =
+      """{
+        |	"namespace": "com.apache.schema",
+        |	"type": "record",
+        |	"name": "StudentActivity",
+        |	"fields": [
+        |		{
+        |			"name": "id",
+        |			"type": "null"
+        |		},
+        |		{
+        |			"name": "course_details",
+        |			"type": {
+        |				"name": "course_details",
+        |				"type": "record",
+        |				"fields": [
+        |					{
+        |						"name": "course_struct_course_time",
+        |						"type": "string"
+        |					}
+        |				]
+        |			}
+        |		}
+        |	]
+        |}""".stripMargin
+
+    val json1 =
+      """{"id": 101,"course_details": { "course_struct_course_time":"2014-01-05"  }}""".stripMargin
+
+    val nn = new org.apache.avro.Schema.Parser().parse(schema1)
+    val converter = new JsonAvroConverter
+    val record = converter
+      .convertToGenericDataRecord(json1.getBytes(CharEncoding.UTF_8), nn)
+
+    val writer = CarbonWriter.builder.withSchema(AvroCarbonWriter.getCarbonSchemaFromAvroSchema(schema1))
+      .outputPath(writerPath).isTransactionalTable(false).buildWriterForAvroInput
+    writer.write(record)
+    writer.close()
+  }
+
   test("test if data load is success with a struct having timestamp column  ") {
     val schema1 =
       """{

http://git-wip-us.apache.org/repos/asf/carbondata/blob/3d8b085a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
index 55fd211..137e3f4 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
@@ -75,15 +75,18 @@ public class AvroCarbonWriter extends CarbonWriter {
       avroSchema = avroRecord.getSchema();
     }
     List<Schema.Field> fields = avroSchema.getFields();
-    Object[] csvField = new Object[fields.size()];
+    List<Object> csvFields = new ArrayList<>();
     for (int i = 0; i < fields.size(); i++) {
-      csvField[i] = avroFieldToObject(fields.get(i), avroRecord.get(i));
+      Object field = avroFieldToObject(fields.get(i), avroRecord.get(i));
+      if (field != null) {
+        csvFields.add(field);
+      }
     }
-    return csvField;
+    return csvFields.toArray();
   }
 
   private Object avroFieldToObject(Schema.Field avroField, Object fieldValue) {
-    Object out = new Object();
+    Object out;
     Schema.Type type = avroField.schema().getType();
     switch (type) {
       case BOOLEAN:
@@ -102,24 +105,45 @@ public class AvroCarbonWriter extends CarbonWriter {
 
         Object[] structChildObjects = new Object[fields.size()];
         for (int i = 0; i < fields.size(); i++) {
-          structChildObjects[i] =
+          Object childObject =
               avroFieldToObject(fields.get(i), ((GenericData.Record) fieldValue).get(i));
+          if (childObject != null) {
+            structChildObjects[i] = childObject;
+          }
         }
         StructObject structObject = new StructObject(structChildObjects);
         out = structObject;
         break;
       case ARRAY:
-        int size = ((ArrayList) fieldValue).size();
-        Object[] arrayChildObjects = new Object[size];
-        for (int i = 0; i < size; i++) {
-          arrayChildObjects[i] = (avroFieldToObject(
-              new Schema.Field(avroField.name(), avroField.schema().getElementType(), null, true),
-              ((ArrayList) fieldValue).get(i)));
+        Object[] arrayChildObjects;
+        if (fieldValue instanceof GenericData.Array) {
+          int size = ((GenericData.Array) fieldValue).size();
+          arrayChildObjects = new Object[size];
+          for (int i = 0; i < size; i++) {
+            Object childObject = avroFieldToObject(
+                new Schema.Field(avroField.name(), avroField.schema().getElementType(), null, true),
+                ((GenericData.Array) fieldValue).get(i));
+            if (childObject != null) {
+              arrayChildObjects[i] = childObject;
+            }
+          }
+        } else {
+          int size = ((ArrayList) fieldValue).size();
+          arrayChildObjects = new Object[size];
+          for (int i = 0; i < size; i++) {
+            Object childObject = avroFieldToObject(
+                new Schema.Field(avroField.name(), avroField.schema().getElementType(), null, true),
+                ((ArrayList) fieldValue).get(i));
+            if (childObject != null) {
+              arrayChildObjects[i] = childObject;
+            }
+          }
         }
-        ArrayObject arrayObject = new ArrayObject(arrayChildObjects);
-        out = arrayObject;
+        out = new ArrayObject(arrayChildObjects);
+        break;
+      case NULL:
+        out = null;
         break;
-
       default:
         throw new UnsupportedOperationException(
             "carbon not support " + type.toString() + " avro type yet");
@@ -142,7 +166,10 @@ public class AvroCarbonWriter extends CarbonWriter {
     Field[] carbonField = new Field[avroSchema.getFields().size()];
     int i = 0;
     for (Schema.Field avroField : avroSchema.getFields()) {
-      carbonField[i] = prepareFields(avroField);
+      Field field = prepareFields(avroField);
+      if (field != null) {
+        carbonField[i] = field;
+      }
       i++;
     }
     return new org.apache.carbondata.sdk.file.Schema(carbonField);
@@ -169,15 +196,25 @@ public class AvroCarbonWriter extends CarbonWriter {
         // recursively get the sub fields
         ArrayList<StructField> structSubFields = new ArrayList<>();
         for (Schema.Field avroSubField : childSchema.getFields()) {
-          structSubFields.add(prepareSubFields(avroSubField.name(), avroSubField.schema()));
+          StructField structField = prepareSubFields(avroSubField.name(), avroSubField.schema());
+          if (structField != null) {
+            structSubFields.add(structField);
+          }
         }
         return new Field(FieldName, "struct", structSubFields);
       case ARRAY:
         // recursively get the sub fields
         ArrayList<StructField> arraySubField = new ArrayList<>();
         // array will have only one sub field.
-        arraySubField.add(prepareSubFields("val", childSchema.getElementType()));
-        return new Field(FieldName, "array", arraySubField);
+        StructField structField = prepareSubFields("val", childSchema.getElementType());
+        if (structField != null) {
+          arraySubField.add(structField);
+          return new Field(FieldName, "array", arraySubField);
+        } else {
+          return null;
+        }
+      case NULL:
+        return null;
       default:
         throw new UnsupportedOperationException(
             "carbon not support " + type.toString() + " avro type yet");
@@ -203,14 +240,23 @@ public class AvroCarbonWriter extends CarbonWriter {
         // recursively get the sub fields
         ArrayList<StructField> structSubFields = new ArrayList<>();
         for (Schema.Field avroSubField : childSchema.getFields()) {
-          structSubFields.add(prepareSubFields(avroSubField.name(), avroSubField.schema()));
+          StructField structField = prepareSubFields(avroSubField.name(), avroSubField.schema());
+          if (structField != null) {
+            structSubFields.add(structField);
+          }
         }
         return (new StructField(FieldName, DataTypes.createStructType(structSubFields)));
       case ARRAY:
         // recursively get the sub fields
         // array will have only one sub field.
-        return (new StructField(FieldName, DataTypes.createArrayType(
-            getMappingDataTypeForArrayRecord(childSchema.getElementType()))));
+        DataType subType = getMappingDataTypeForArrayRecord(childSchema.getElementType());
+        if (subType != null) {
+          return (new StructField(FieldName, DataTypes.createArrayType(subType)));
+        } else {
+          return null;
+        }
+      case NULL:
+        return null;
       default:
         throw new UnsupportedOperationException(
             "carbon not support " + type.toString() + " avro type yet");
@@ -235,13 +281,22 @@ public class AvroCarbonWriter extends CarbonWriter {
         // recursively get the sub fields
         ArrayList<StructField> structSubFields = new ArrayList<>();
         for (Schema.Field avroSubField : childSchema.getFields()) {
-          structSubFields.add(prepareSubFields(avroSubField.name(), avroSubField.schema()));
+          StructField structField = prepareSubFields(avroSubField.name(), avroSubField.schema());
+          if (structField != null) {
+            structSubFields.add(structField);
+          }
         }
         return DataTypes.createStructType(structSubFields);
       case ARRAY:
         // array will have only one sub field.
-        return DataTypes.createArrayType(
-            getMappingDataTypeForArrayRecord(childSchema.getElementType()));
+        DataType subType = getMappingDataTypeForArrayRecord(childSchema.getElementType());
+        if (subType != null) {
+          return DataTypes.createArrayType(subType);
+        } else {
+          return null;
+        }
+      case NULL:
+        return null;
       default:
         throw new UnsupportedOperationException(
             "carbon not support " + childSchema.getType().toString() + " avro type yet");


[11/50] [abbrv] carbondata git commit: [CARBONDATA-2457] Add converter to get Carbon SDK Schema from Avro schema directly.

Posted by ma...@apache.org.
[CARBONDATA-2457] Add converter to get Carbon SDK Schema from Avro schema directly.

In the current implementation, SDK users have to manually create carbon schema of fields from avro schema.
This is time-consuming and error-prone. Also, user should not be worried about this logic.
So, abstract the carbon schema creation from avro schema by exposing a method to user.

This closes #2283


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/cf3e9196
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/cf3e9196
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/cf3e9196

Branch: refs/heads/spark-2.3
Commit: cf3e919651f3b84c3045d1be8fb89a8a8cfd8242
Parents: 747be9b
Author: ajantha-bhat <aj...@gmail.com>
Authored: Tue May 8 12:12:37 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Wed May 9 18:07:05 2018 +0530

----------------------------------------------------------------------
 .../TestNonTransactionalCarbonTable.scala       | 172 ++-----------------
 .../carbondata/sdk/file/AvroCarbonWriter.java   | 129 +++++++++++++-
 .../sdk/file/AvroCarbonWriterTest.java          |  63 +------
 3 files changed, 145 insertions(+), 219 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/cf3e9196/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index c641ed3..2f88c40 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -31,7 +31,7 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.util.CarbonUtil
-import org.apache.carbondata.sdk.file.{CarbonWriter, Field, Schema}
+import org.apache.carbondata.sdk.file.{AvroCarbonWriter, CarbonWriter, Field, Schema}
 import scala.collection.JavaConverters._
 import scala.collection.mutable
 
@@ -39,8 +39,6 @@ import org.apache.avro
 import org.apache.commons.lang.CharEncoding
 import tech.allegro.schema.json2avro.converter.JsonAvroConverter
 
-import org.apache.carbondata.core.metadata.datatype.{DataTypes, StructField}
-
 class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
   var writerPath = new File(this.getClass.getResource("/").getPath
@@ -351,8 +349,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-
-
   test("read non transactional table, files written from sdk Writer Output)") {
     buildTestDataSingleFile()
     assert(new File(writerPath).exists())
@@ -531,7 +527,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-
   test("Read sdk writer output file without any file should fail") {
     buildTestDataSingleFile()
     deleteFile(writerPath, CarbonCommonConstants.FACT_FILE_EXT)
@@ -685,11 +680,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
 
   }
 
-
-  private def WriteFilesWithAvroWriter(rows: Int,
-      mySchema: String,
-      json: String,
-      fields: Array[Field]) = {
+  private def WriteFilesWithAvroWriter(rows: Int, mySchema: String, json: String): Unit = {
     // conversion to GenericData.Record
     val nn = new avro.Schema.Parser().parse(mySchema)
     val converter = new JsonAvroConverter
@@ -697,7 +688,8 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       .convertToGenericDataRecord(json.getBytes(CharEncoding.UTF_8), nn)
 
     try {
-      val writer = CarbonWriter.builder.withSchema(new Schema(fields))
+      val writer = CarbonWriter.builder
+        .withSchema(AvroCarbonWriter.getCarbonSchemaFromAvroSchema(mySchema))
         .outputPath(writerPath).isTransactionalTable(false)
         .uniqueIdentifier(System.currentTimeMillis()).buildWriterForAvroInput
       var i = 0
@@ -734,16 +726,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       """.stripMargin
 
     val json = """ {"name":"bob", "age":10, "address" : {"street":"abc", "city":"bang"}} """
-
-    val fields = new Array[Field](3)
-    fields(0) = new Field("name", DataTypes.STRING)
-    fields(1) = new Field("age", DataTypes.INT)
-    val fld = new util.ArrayList[StructField]
-    fld.add(new StructField("street", DataTypes.STRING))
-    fld.add(new StructField("city", DataTypes.STRING))
-    fields(2) = new Field("address", "struct", fld)
-
-    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+    WriteFilesWithAvroWriter(rows, mySchema, json)
   }
 
   def buildAvroTestDataStructType(): Any = {
@@ -782,17 +765,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
                    """.stripMargin
 
     val json: String = """ {"name": "bob","age": 10,"address": ["abc", "defg"]} """
-
-
-    val fields = new Array[Field](3)
-    fields(0) = new Field("name", DataTypes.STRING)
-    fields(1) = new Field("age", DataTypes.INT)
-    // fields[1] = new Field("age", DataTypes.INT);
-    val fld = new util.ArrayList[StructField]
-    fld.add(new StructField("street", DataTypes.STRING))
-    fields(2) = new Field("address", "array", fld)
-
-    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+    WriteFilesWithAvroWriter(rows, mySchema, json)
   }
 
   def buildAvroTestDataSingleFileArrayType(): Any = {
@@ -836,18 +809,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       """ {"name":"bob", "age":10,
         |"address" : {"street":"abc", "city":"bang"},
         |"doorNum" : [1,2,3,4]}""".stripMargin
-
-    val fields = new Array[Field](4)
-    fields(0) = new Field("name", DataTypes.STRING)
-    fields(1) = new Field("age", DataTypes.INT)
-    val fld = new util.ArrayList[StructField]
-    fld.add(new StructField("street", DataTypes.STRING))
-    fld.add(new StructField("city", DataTypes.STRING))
-    fields(2) = new Field("address", "struct", fld)
-    val fld1 = new util.ArrayList[StructField]
-    fld1.add(new StructField("eachDoorNum", DataTypes.INT))
-    fields(3) = new Field("doorNum", "array", fld1)
-    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+    WriteFilesWithAvroWriter(rows, mySchema, json)
   }
 
   def buildAvroTestDataBothStructArrayType(): Any = {
@@ -855,7 +817,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     buildAvroTestDataStructWithArrayType(3, null)
   }
 
-
   // ArrayOfStruct test
   def buildAvroTestDataArrayOfStruct(rows: Int, options: util.Map[String, String]): Any = {
     FileUtils.deleteDirectory(new File(writerPath))
@@ -900,20 +861,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |{"street":"def","city":"city2"},
         |{"street":"ghi","city":"city3"},
         |{"street":"jkl","city":"city4"}]} """.stripMargin
-
-    val fields = new Array[Field](3)
-    fields(0) = new Field("name", DataTypes.STRING)
-    fields(1) = new Field("age", DataTypes.INT)
-
-    val fld = new util.ArrayList[StructField]
-    fld.add(new StructField("street", DataTypes.STRING))
-    fld.add(new StructField("city", DataTypes.STRING))
-
-    val fld2 = new util.ArrayList[StructField]
-    fld2.add(new StructField("my_address", DataTypes.createStructType(fld), fld))
-    fields(2) = new Field("doorNum", DataTypes.createArrayType(fld2.get(0).getDataType), fld2)
-
-    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+    WriteFilesWithAvroWriter(rows, mySchema, json)
   }
 
   def buildAvroTestDataArrayOfStructType(): Any = {
@@ -921,7 +869,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     buildAvroTestDataArrayOfStruct(3, null)
   }
 
-
   // StructOfArray test
   def buildAvroTestDataStructOfArray(rows: Int, options: util.Map[String, String]): Any = {
     FileUtils.deleteDirectory(new File(writerPath))
@@ -983,21 +930,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
                  |		]
                  |	}
                  |} """.stripMargin
-
-    val fields = new Array[Field](3)
-    fields(0) = new Field("name", DataTypes.STRING)
-    fields(1) = new Field("age", DataTypes.INT)
-
-    val fld2 = new util.ArrayList[StructField]
-    fld2.add(new StructField("street", DataTypes.STRING))
-    fld2.add(new StructField("city", DataTypes.STRING))
-
-    val fld1 = new util.ArrayList[StructField]
-    fld1.add(new StructField("eachDoorNum", DataTypes.INT))
-    fld2.add(new StructField("doorNum", DataTypes.createArrayType(DataTypes.INT), fld1))
-
-    fields(2) = new Field("address","struct",fld2)
-    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+    WriteFilesWithAvroWriter(rows, mySchema, json)
   }
 
   def buildAvroTestDataStructOfArrayType(): Any = {
@@ -1005,7 +938,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     buildAvroTestDataStructOfArray(3, null)
   }
 
-
   test("Read sdk writer Avro output Record Type") {
     buildAvroTestDataStructType()
     assert(new File(writerPath).exists())
@@ -1014,7 +946,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
       s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
          |'$writerPath' """.stripMargin)
 
-
     checkAnswer(sql("select * from sdkOutputTable"), Seq(
       Row("bob", 10, Row("abc","bang")),
       Row("bob", 10, Row("abc","bang")),
@@ -1073,7 +1004,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-
   test("Read sdk writer Avro output with Array of struct") {
     buildAvroTestDataArrayOfStructType()
     assert(new File(writerPath).exists())
@@ -1099,7 +1029,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-
   // Struct of array
   test("Read sdk writer Avro output with struct of Array") {
     buildAvroTestDataStructOfArrayType()
@@ -1201,21 +1130,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |		}
         |	]
         |} """.stripMargin
-
-    val fields = new Array[Field](3)
-    fields(0) = new Field("name", DataTypes.STRING)
-    fields(1) = new Field("age", DataTypes.INT)
-
-    val fld = new util.ArrayList[StructField]
-    fld.add(new StructField("street", DataTypes.STRING))
-    fld.add(new StructField("city", DataTypes.STRING))
-    fld.add(new StructField("FloorNum", DataTypes.createArrayType(DataTypes.INT)))
-
-    val fld2 = new util.ArrayList[StructField]
-    fld2.add(new StructField("my_address", DataTypes.createStructType(fld), fld))
-    fields(2) = new Field("doorNum", DataTypes.createArrayType(fld2.get(0).getDataType), fld2)
-
-    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+    WriteFilesWithAvroWriter(rows, mySchema, json)
   }
 
   def buildAvroTestDataMultiLevel3Type(): Any = {
@@ -1253,7 +1168,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-
   // test multi level -- 3 levels [array of struct of struct of string, int]
   def buildAvroTestDataMultiLevel3_1(rows: Int, options: util.Map[String, String]): Any = {
     FileUtils.deleteDirectory(new File(writerPath))
@@ -1333,26 +1247,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |		}
         |	]
         |}  """.stripMargin
-
-    val fields = new Array[Field](3)
-    fields(0) = new Field("name", DataTypes.STRING)
-    fields(1) = new Field("age", DataTypes.INT)
-
-    val fld = new util.ArrayList[StructField]
-    fld.add(new StructField("street", DataTypes.STRING))
-    fld.add(new StructField("city", DataTypes.STRING))
-
-    val subFld = new util.ArrayList[StructField]
-    subFld.add(new StructField("wing", DataTypes.STRING))
-    subFld.add(new StructField("number", DataTypes.INT))
-    fld.add(new StructField("FloorNum", DataTypes.createStructType(subFld)))
-
-    // array of struct of struct
-    val fld2 = new util.ArrayList[StructField]
-    fld2.add(new StructField("my_address", DataTypes.createStructType(fld), fld))
-    fields(2) = new Field("doorNum", DataTypes.createArrayType(fld2.get(0).getDataType), fld2)
-
-    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+    WriteFilesWithAvroWriter(rows, mySchema, json)
   }
 
   def buildAvroTestDataMultiLevel3_1Type(): Any = {
@@ -1432,22 +1327,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |        	"BuildNum": [[[1,2,3],[4,5,6]],[[10,20,30],[40,50,60]]]
         |        }   """.stripMargin
 
-    val fields = new Array[Field](3)
-    fields(0) = new Field("name", DataTypes.STRING)
-    fields(1) = new Field("age", DataTypes.INT)
-
-    val subFld = new util.ArrayList[StructField]
-    subFld.add(new StructField("EachDoorNum", DataTypes.INT))
-
-    val fld = new util.ArrayList[StructField]
-    fld.add(new StructField("DoorNum", DataTypes.createArrayType(DataTypes.INT), subFld))
-    // array of struct of struct
-    val doorNum = new util.ArrayList[StructField]
-    doorNum.add(new StructField("FloorNum",
-      DataTypes.createArrayType(DataTypes.createArrayType(DataTypes.INT)), fld))
-    fields(2) = new Field("BuildNum", "array", doorNum)
-
-    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+    WriteFilesWithAvroWriter(rows, mySchema, json)
   }
 
   def buildAvroTestDataMultiLevel3_2Type(): Any = {
@@ -1486,8 +1366,6 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-
-
   // test multi level -- 4 levels [array of array of array of struct]
   def buildAvroTestDataMultiLevel4(rows: Int, options: util.Map[String, String]): Any = {
     FileUtils.deleteDirectory(new File(writerPath))
@@ -1566,30 +1444,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
         |		]
         |	]
         |} """.stripMargin
-
-    val fields = new Array[Field](3)
-    fields(0) = new Field("name", DataTypes.STRING)
-    fields(1) = new Field("age", DataTypes.INT)
-
-    val subFld = new util.ArrayList[StructField]
-    subFld.add(new StructField("EachDoorNum", DataTypes.INT))
-
-    val address = new util.ArrayList[StructField]
-    address.add(new StructField("street", DataTypes.STRING))
-    address.add(new StructField("city", DataTypes.STRING))
-
-    val fld = new util.ArrayList[StructField]
-    fld.add(new StructField("DoorNum",
-        DataTypes.createArrayType(DataTypes.createStructType(address)),
-        subFld))
-    // array of struct of struct
-    val doorNum = new util.ArrayList[StructField]
-    doorNum.add(new StructField("FloorNum",
-      DataTypes.createArrayType(
-        DataTypes.createArrayType(DataTypes.createStructType(address))), fld))
-    fields(2) = new Field("BuildNum", "array", doorNum)
-
-    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+    WriteFilesWithAvroWriter(rows, mySchema, json)
   }
 
   def buildAvroTestDataMultiLevel4Type(): Any = {
@@ -1615,5 +1470,4 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
-
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cf3e9196/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
index 946040f..55fd211 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/AvroCarbonWriter.java
@@ -24,6 +24,9 @@ import java.util.Random;
 import java.util.UUID;
 
 import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.core.metadata.datatype.DataType;
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.core.metadata.datatype.StructField;
 import org.apache.carbondata.hadoop.api.CarbonTableOutputFormat;
 import org.apache.carbondata.hadoop.internal.ObjectArrayWritable;
 import org.apache.carbondata.processing.loading.complexobjects.ArrayObject;
@@ -46,7 +49,7 @@ import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
  * Writer Implementation to write Avro Record to carbondata file.
  */
 @InterfaceAudience.Internal
-class AvroCarbonWriter extends CarbonWriter {
+public class AvroCarbonWriter extends CarbonWriter {
 
   private RecordWriter<NullWritable, ObjectArrayWritable> recordWriter;
   private TaskAttemptContext context;
@@ -118,12 +121,134 @@ class AvroCarbonWriter extends CarbonWriter {
         break;
 
       default:
-        throw new UnsupportedOperationException();
+        throw new UnsupportedOperationException(
+            "carbon not support " + type.toString() + " avro type yet");
     }
     return out;
   }
 
   /**
+   * converts avro schema to carbon schema required by carbonWriter
+   *
+   * @param avroSchemaString json formatted avro schema as string
+   * @return carbon sdk schema
+   */
+  public static org.apache.carbondata.sdk.file.Schema getCarbonSchemaFromAvroSchema(
+      String avroSchemaString) {
+    if (avroSchemaString == null) {
+      throw new UnsupportedOperationException("avro schema string cannot be null");
+    }
+    Schema avroSchema = new Schema.Parser().parse(avroSchemaString);
+    Field[] carbonField = new Field[avroSchema.getFields().size()];
+    int i = 0;
+    for (Schema.Field avroField : avroSchema.getFields()) {
+      carbonField[i] = prepareFields(avroField);
+      i++;
+    }
+    return new org.apache.carbondata.sdk.file.Schema(carbonField);
+  }
+
+  private static Field prepareFields(Schema.Field avroField) {
+    String FieldName = avroField.name();
+    Schema childSchema = avroField.schema();
+    Schema.Type type = childSchema.getType();
+    switch (type) {
+      case BOOLEAN:
+        return new Field(FieldName, DataTypes.BOOLEAN);
+      case INT:
+        return new Field(FieldName, DataTypes.INT);
+      case LONG:
+        return new Field(FieldName, DataTypes.LONG);
+      case DOUBLE:
+        return new Field(FieldName, DataTypes.DOUBLE);
+      case STRING:
+        return new Field(FieldName, DataTypes.STRING);
+      case FLOAT:
+        return new Field(FieldName, DataTypes.DOUBLE);
+      case RECORD:
+        // recursively get the sub fields
+        ArrayList<StructField> structSubFields = new ArrayList<>();
+        for (Schema.Field avroSubField : childSchema.getFields()) {
+          structSubFields.add(prepareSubFields(avroSubField.name(), avroSubField.schema()));
+        }
+        return new Field(FieldName, "struct", structSubFields);
+      case ARRAY:
+        // recursively get the sub fields
+        ArrayList<StructField> arraySubField = new ArrayList<>();
+        // array will have only one sub field.
+        arraySubField.add(prepareSubFields("val", childSchema.getElementType()));
+        return new Field(FieldName, "array", arraySubField);
+      default:
+        throw new UnsupportedOperationException(
+            "carbon not support " + type.toString() + " avro type yet");
+    }
+  }
+
+  private static StructField prepareSubFields(String FieldName, Schema childSchema) {
+    Schema.Type type = childSchema.getType();
+    switch (type) {
+      case BOOLEAN:
+        return new StructField(FieldName, DataTypes.BOOLEAN);
+      case INT:
+        return new StructField(FieldName, DataTypes.INT);
+      case LONG:
+        return new StructField(FieldName, DataTypes.LONG);
+      case DOUBLE:
+        return new StructField(FieldName, DataTypes.DOUBLE);
+      case STRING:
+        return new StructField(FieldName, DataTypes.STRING);
+      case FLOAT:
+        return new StructField(FieldName, DataTypes.DOUBLE);
+      case RECORD:
+        // recursively get the sub fields
+        ArrayList<StructField> structSubFields = new ArrayList<>();
+        for (Schema.Field avroSubField : childSchema.getFields()) {
+          structSubFields.add(prepareSubFields(avroSubField.name(), avroSubField.schema()));
+        }
+        return (new StructField(FieldName, DataTypes.createStructType(structSubFields)));
+      case ARRAY:
+        // recursively get the sub fields
+        // array will have only one sub field.
+        return (new StructField(FieldName, DataTypes.createArrayType(
+            getMappingDataTypeForArrayRecord(childSchema.getElementType()))));
+      default:
+        throw new UnsupportedOperationException(
+            "carbon not support " + type.toString() + " avro type yet");
+    }
+  }
+
+  private static DataType getMappingDataTypeForArrayRecord(Schema childSchema) {
+    switch (childSchema.getType()) {
+      case BOOLEAN:
+        return DataTypes.BOOLEAN;
+      case INT:
+        return DataTypes.INT;
+      case LONG:
+        return DataTypes.LONG;
+      case DOUBLE:
+        return DataTypes.DOUBLE;
+      case STRING:
+        return DataTypes.STRING;
+      case FLOAT:
+        return DataTypes.DOUBLE;
+      case RECORD:
+        // recursively get the sub fields
+        ArrayList<StructField> structSubFields = new ArrayList<>();
+        for (Schema.Field avroSubField : childSchema.getFields()) {
+          structSubFields.add(prepareSubFields(avroSubField.name(), avroSubField.schema()));
+        }
+        return DataTypes.createStructType(structSubFields);
+      case ARRAY:
+        // array will have only one sub field.
+        return DataTypes.createArrayType(
+            getMappingDataTypeForArrayRecord(childSchema.getElementType()));
+      default:
+        throw new UnsupportedOperationException(
+            "carbon not support " + childSchema.getType().toString() + " avro type yet");
+    }
+  }
+
+  /**
    * Write single row data, input row is Avro Record
    */
   @Override

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cf3e9196/store/sdk/src/test/java/org/apache/carbondata/sdk/file/AvroCarbonWriterTest.java
----------------------------------------------------------------------
diff --git a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/AvroCarbonWriterTest.java b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/AvroCarbonWriterTest.java
index 105fb6d..163512a 100644
--- a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/AvroCarbonWriterTest.java
+++ b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/AvroCarbonWriterTest.java
@@ -21,30 +21,20 @@ import java.io.File;
 import java.io.FileFilter;
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
-import java.util.ArrayList;
-import java.util.List;
 
 import org.apache.carbondata.common.exceptions.sql.InvalidLoadOptionException;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.metadata.datatype.ArrayType;
-import org.apache.carbondata.core.metadata.datatype.DataTypes;
-import org.apache.carbondata.core.metadata.datatype.StructField;
-import org.apache.carbondata.core.metadata.datatype.StructType;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
 import org.apache.avro.generic.GenericData;
-import org.apache.avro.generic.GenericRecord;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.CharEncoding;
-import org.apache.hadoop.conf.Configuration;
 import org.junit.Assert;
 import org.junit.Test;
 
-import scala.Array;
 import tech.allegro.schema.json2avro.converter.JsonAvroConverter;
 import org.apache.avro.Schema;
 
-import static org.apache.hadoop.yarn.webapp.hamlet.HamletSpec.InputType.file;
 
 public class AvroCarbonWriterTest {
   private String path = "./AvroCarbonWriterSuiteWriteFiles";
@@ -70,13 +60,9 @@ public class AvroCarbonWriterTest {
     GenericData.Record record = converter.convertToGenericDataRecord(
         json.getBytes(CharEncoding.UTF_8), new Schema.Parser().parse(avroSchema));
 
-    Field[] fields = new Field[2];
-    fields[0] = new Field("name", DataTypes.STRING);
-    fields[1] = new Field("age", DataTypes.STRING);
-
     try {
       CarbonWriter writer = CarbonWriter.builder()
-          .withSchema(new org.apache.carbondata.sdk.file.Schema(fields))
+          .withSchema(AvroCarbonWriter.getCarbonSchemaFromAvroSchema(avroSchema))
           .outputPath(path)
           .isTransactionalTable(true)
           .buildWriterForAvroInput();
@@ -145,19 +131,9 @@ public class AvroCarbonWriterTest {
     GenericData.Record record = converter.convertToGenericDataRecord(
         json.getBytes(CharEncoding.UTF_8), new Schema.Parser().parse(avroSchema));
 
-    Field[] fields = new Field[6];
-    // fields[0] = new Field("mynull", DataTypes.NULL);
-    fields[0] = new Field("myboolean", DataTypes.BOOLEAN);
-    fields[1] = new Field("myint", DataTypes.INT);
-    fields[2] = new Field("mylong", DataTypes.LONG);
-    fields[3] = new Field("myfloat", DataTypes.DOUBLE);
-    fields[4] = new Field("mydouble", DataTypes.DOUBLE);
-    fields[5] = new Field("mystring", DataTypes.STRING);
-
-
     try {
       CarbonWriter writer = CarbonWriter.builder()
-          .withSchema(new org.apache.carbondata.sdk.file.Schema(fields))
+          .withSchema(AvroCarbonWriter.getCarbonSchemaFromAvroSchema(avroSchema))
           .outputPath(path)
           .isTransactionalTable(true)
           .buildWriterForAvroInput();
@@ -250,18 +226,9 @@ public class AvroCarbonWriterTest {
     GenericData.Record record = converter.convertToGenericDataRecord(
         json.getBytes(CharEncoding.UTF_8), nn);
 
-    Field[] fields = new Field[3];
-    fields[0] = new Field("name", DataTypes.STRING);
-    fields[1] = new Field("name1", DataTypes.STRING);
-    // fields[1] = new Field("age", DataTypes.INT);
-    List fld = new ArrayList<StructField>();
-    fld.add(new StructField("street", DataTypes.STRING));
-    fld.add(new StructField("city", DataTypes.STRING));
-    fields[2] = new Field("address", "struct", fld);
-
     try {
       CarbonWriter writer = CarbonWriter.builder()
-          .withSchema(new org.apache.carbondata.sdk.file.Schema(fields))
+          .withSchema(AvroCarbonWriter.getCarbonSchemaFromAvroSchema(mySchema))
           .outputPath(path)
           .isTransactionalTable(true)
           .buildWriterForAvroInput();
@@ -323,18 +290,9 @@ public class AvroCarbonWriterTest {
     GenericData.Record record = converter.convertToGenericDataRecord(
         json.getBytes(CharEncoding.UTF_8), nn);
 
-    Field[] fields = new Field[3];
-    fields[0] = new Field("name", DataTypes.STRING);
-    fields[1] = new Field("name1", DataTypes.STRING);
-    // fields[1] = new Field("age", DataTypes.INT);
-    List fld = new ArrayList<StructField>();
-    fld.add(new StructField("street", DataTypes.STRING));
-    fld.add(new StructField("city", DataTypes.STRING));
-    fields[2] = new Field("address", "struct", fld);
-
     try {
       CarbonWriter writer = CarbonWriter.builder()
-          .withSchema(new org.apache.carbondata.sdk.file.Schema(fields))
+          .withSchema(AvroCarbonWriter.getCarbonSchemaFromAvroSchema(mySchema))
           .outputPath(path)
           .isTransactionalTable(true)
           .buildWriterForAvroInput();
@@ -365,17 +323,6 @@ public class AvroCarbonWriterTest {
 
   private void WriteAvroComplexData(String mySchema, String json, String[] sortColumns)
       throws UnsupportedEncodingException, IOException, InvalidLoadOptionException {
-    Field[] fields = new Field[4];
-    fields[0] = new Field("name", DataTypes.STRING);
-    fields[1] = new Field("name1", DataTypes.STRING);
-    // fields[1] = new Field("age", DataTypes.INT);
-    List fld = new ArrayList<StructField>();
-    fld.add(new StructField("street", DataTypes.STRING));
-    fld.add(new StructField("city", DataTypes.STRING));
-    fields[2] = new Field("address", "struct", fld);
-    List fld1 = new ArrayList<StructField>();
-    fld1.add(new StructField("eachDoorNum", DataTypes.INT));
-    fields[3] = new Field("doorNum","array",fld1);
 
     // conversion to GenericData.Record
     Schema nn = new Schema.Parser().parse(mySchema);
@@ -385,7 +332,7 @@ public class AvroCarbonWriterTest {
 
     try {
       CarbonWriter writer = CarbonWriter.builder()
-          .withSchema(new org.apache.carbondata.sdk.file.Schema(fields))
+          .withSchema(AvroCarbonWriter.getCarbonSchemaFromAvroSchema(mySchema))
           .outputPath(path)
           .isTransactionalTable(true).sortBy(sortColumns)
           .buildWriterForAvroInput();


[28/50] [abbrv] carbondata git commit: [CARBONDATA-2474] Support Modular Plan for Materialized View DataMap

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/BirdcageOptimizer.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/BirdcageOptimizer.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/BirdcageOptimizer.scala
new file mode 100644
index 0000000..6363089
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/BirdcageOptimizer.scala
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.util
+
+import org.apache.spark.sql.catalyst.analysis._
+import org.apache.spark.sql.catalyst.expressions._
+import org.apache.spark.sql.catalyst.optimizer._
+import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, _}
+import org.apache.spark.sql.catalyst.rules.{RuleExecutor, _}
+import org.apache.spark.sql.internal.SQLConf
+
+object BirdcageOptimizer extends RuleExecutor[LogicalPlan] {
+
+  val conf = new SQLConf()
+    .copy(SQLConf.CASE_SENSITIVE -> true, SQLConf.STARSCHEMA_DETECTION -> true)
+  protected val fixedPoint = FixedPoint(conf.optimizerMaxIterations)
+
+  def batches: Seq[Batch] = {
+    // Technically some of the rules in Finish Analysis are not optimizer rules and belong more
+    // in the analyzer, because they are needed for correctness (e.g. ComputeCurrentTime).
+    // However, because we also use the analyzer to canonicalized queries (for view definition),
+    // we do not eliminate subqueries or compute current time in the analyzer.
+    Batch(
+      "Finish Analysis", Once,
+      EliminateSubqueryAliases,
+      EliminateView,
+      ReplaceExpressions,
+      ComputeCurrentTime,
+      //      GetCurrentDatabase(sessionCatalog),
+      RewriteDistinctAggregates,
+      ReplaceDeduplicateWithAggregate) ::
+    //////////////////////////////////////////////////////////////////////////////////////////
+    // Optimizer rules start here
+    //////////////////////////////////////////////////////////////////////////////////////////
+    // - Do the first call of CombineUnions before starting the major Optimizer rules,
+    //   since it can reduce the number of iteration and the other rules could add/move
+    //   extra operators between two adjacent Union operators.
+    // - Call CombineUnions again in Batch("Operator Optimizations"),
+    //   since the other rules might make two separate Unions operators adjacent.
+    Batch(
+      "Union", Once,
+      CombineUnions) ::
+    Batch(
+      "Pullup Correlated Expressions", Once,
+      PullupCorrelatedPredicates) ::
+    Batch(
+      "Subquery", Once,
+      OptimizeSubqueries) ::
+    Batch(
+      "Replace Operators", fixedPoint,
+      ReplaceIntersectWithSemiJoin,
+      ReplaceExceptWithAntiJoin,
+      ReplaceDistinctWithAggregate) ::
+    Batch(
+      "Aggregate", fixedPoint,
+      RemoveLiteralFromGroupExpressions,
+      RemoveRepetitionFromGroupExpressions) ::
+    Batch(
+      "Operator Optimizations", fixedPoint, Seq(
+        // Operator push down
+        PushProjectionThroughUnion,
+        ReorderJoin(conf),
+        EliminateOuterJoin(conf),
+        PushPredicateThroughJoin,
+        PushDownPredicate,
+        //      LimitPushDown(conf),
+        ColumnPruning,
+        //      InferFiltersFromConstraints(conf),
+        // Operator combine
+        CollapseRepartition,
+        CollapseProject,
+        CollapseWindow,
+        CombineFilters,
+        CombineLimits,
+        CombineUnions,
+        // Constant folding and strength reduction
+        NullPropagation(conf),
+        FoldablePropagation,
+        //      OptimizeIn(conf),
+        ConstantFolding,
+        ReorderAssociativeOperator,
+        LikeSimplification,
+        BooleanSimplification,
+        SimplifyConditionals,
+        RemoveDispensableExpressions,
+        SimplifyBinaryComparison,
+        //      PruneFilters(conf),
+        EliminateSorts,
+        SimplifyCasts,
+        SimplifyCaseConversionExpressions,
+        RewriteCorrelatedScalarSubquery,
+        EliminateSerialization,
+        RemoveRedundantAliases,
+        RemoveRedundantProject,
+        SimplifyCreateStructOps,
+        SimplifyCreateArrayOps,
+        SimplifyCreateMapOps) ++
+                                            extendedOperatorOptimizationRules: _*) ::
+    Batch(
+      "Check Cartesian Products", Once,
+      CheckCartesianProducts(conf)) ::
+    //    Batch("Join Reorder", Once,
+    //      CostBasedJoinReorder(conf)) ::
+    //    Batch("Decimal Optimizations", fixedPoint,
+    //      DecimalAggregates(conf)) ::
+    Batch(
+      "Object Expressions Optimization", fixedPoint,
+      EliminateMapObjects,
+      CombineTypedFilters) ::
+    //    Batch("LocalRelation", fixedPoint,
+    //      ConvertToLocalRelation,
+    //      PropagateEmptyRelation) ::
+    Batch(
+      "OptimizeCodegen", Once,
+      OptimizeCodegen(conf)) ::
+    Batch(
+      "RewriteSubquery", Once,
+      RewritePredicateSubquery,
+      CollapseProject) :: Nil
+  }
+
+  /**
+   * Optimize all the subqueries inside expression.
+   */
+  object OptimizeSubqueries extends Rule[LogicalPlan] {
+    def apply(plan: LogicalPlan): LogicalPlan = {
+      plan transformAllExpressions {
+        case s: SubqueryExpression =>
+          val Subquery(newPlan) = BirdcageOptimizer.this.execute(Subquery(s.plan))
+          s.withNewPlan(newPlan)
+      }
+    }
+  }
+
+  /**
+   * Override to provide additional rules for the operator optimization batch.
+   */
+  def extendedOperatorOptimizationRules: Seq[Rule[LogicalPlan]] = {
+    Nil
+  }
+}
+
+/**
+ * Push Aggregate through join to fact table.
+ * Pushes down [[Aggregate]] operators where the `grouping` and `aggregate` expressions can
+ * be evaluated using only the attributes of the fact table, the left or right side of a
+ * star-join.
+ * Other [[Aggregate]] expressions stay in the original [[Aggregate]].
+ *
+ * Check 'Aggregate Pushdown Over Join: Design & Preliminary Results' by LiTao for more details
+ */
+// case class PushAggregateThroughJoin(conf: SQLConf) extends Rule[LogicalPlan] with
+// PredicateHelper {
+//
+//  val tableCluster = {
+//    val mapper = new ObjectMapper().registerModule(DefaultScalaModule)
+//    val tableClusterString = conf.getConfString("spark.mv.tableCluster")
+//    mapper.readValue(tableClusterString, classOf[TableCluster])
+//  }
+//
+//  def apply(plan: LogicalPlan): LogicalPlan = plan transform {
+//
+//    // Push down aggregate expressions through Join
+//    case a @ Aggregate(grouping, aggregate, Project(projectList, Join(left, right, jt, cond)))
+//        if (left.isInstanceOf[LeafNode] && => {
+//      val fTables: Set[String] = tableCluster.getFact
+//      val dTables: Set[String] = tableCluster.getDimension
+// //      if canPushThrough(left,a)
+//
+//      if (fTables.contains(s"${left.databaseName}.${left.tableName}")
+//          Aggregate(newGrouping, newAggregate, Project(projectList, Join(Aggregate(_,_,Project
+// (projectList1, left)), right, jt, cond)))
+//      }
+//    }
+//
+//  private def canPushThrough(join: Join): Boolean = join match {
+//    case Join(left : LeafNode, right: LeafNode, Inner, EqualTo(l: AttributeReference,
+// r: AttributeReference)) => true
+//
+//
+//  }
+//
+//
+// }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Logical2ModularExtractions.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Logical2ModularExtractions.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Logical2ModularExtractions.scala
new file mode 100644
index 0000000..de65e37
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Logical2ModularExtractions.scala
@@ -0,0 +1,355 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.util
+
+import org.apache.spark.sql.catalyst.catalog._
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeReference,
+  AttributeSet, Expression, NamedExpression, PredicateHelper}
+import org.apache.spark.sql.catalyst.plans._
+import org.apache.spark.sql.catalyst.plans.logical._
+import org.apache.spark.sql.execution.datasources.LogicalRelation
+
+import org.apache.carbondata.mv.plans.modular.Flags._
+import org.apache.carbondata.mv.plans.modular.JoinEdge
+
+/**
+ * SelectModule is extracted from logical plan of SPJG query.  All join conditions
+ * filter, and project operators in a single Aggregate-less subtree of logical plan
+ * are collected.
+ *
+ * The returned values for this match are as follows:
+ *  - Conditions for equi-join
+ *  - Conditions for filter
+ *  - Project list for project
+ *
+ */
+object ExtractSelectModule extends PredicateHelper {
+  type ReturnType = (Seq[NamedExpression], Seq[Expression], Seq[Expression], Map[Int, String],
+    Seq[JoinEdge], Seq[LogicalPlan], FlagSet, Seq[Seq[Any]], Seq[Seq[Any]])
+
+  def unapply(plan: LogicalPlan): Option[ReturnType] = {
+    val (outputs, inputs, predicates, joinedges, children, isSelect, _, flags, fspecs, wspecs) =
+      collectProjectsFiltersJoinsAndSort(plan)
+    if (!isSelect) {
+      None
+    } else {
+      Some(
+        outputs,
+        inputs,
+        predicates,
+        collectChildAliasMappings(
+          AttributeSet(outputs).toSeq ++ AttributeSet(predicates).toSeq,
+          children),
+        joinedges,
+        children,
+        flags,
+        fspecs,
+        wspecs)
+    }
+  }
+
+  def collectProjectsFiltersJoinsAndSort(plan: LogicalPlan): (Seq[NamedExpression],
+    Seq[Expression], Seq[Expression], Seq[JoinEdge], Seq[LogicalPlan], Boolean, Map[Attribute,
+    Expression], FlagSet, Seq[Seq[Any]], Seq[Seq[Any]]) = {
+    plan match {
+      case Project(fields, child) =>
+        val (_, inputs, predicates, joinedges, children, _, aliases, flags, fspecs, wspecs) =
+          collectProjectsFiltersJoinsAndSort(child)
+        val substitutedFields = fields.map(substitute(aliases)).asInstanceOf[Seq[NamedExpression]]
+        (substitutedFields, inputs, predicates, joinedges, children, true, collectAliases(
+          substitutedFields), flags, fspecs, wspecs)
+
+      case Filter(condition, child) =>
+        val (outputs, inputs, predicates, joinedges, children, _, aliases, flags, fspecs, wspecs)
+        = collectProjectsFiltersJoinsAndSort(child)
+        val substitutedCondition = substitute(aliases)(condition)
+        (outputs, inputs, predicates.flatMap(splitConjunctivePredicates) ++
+                          splitConjunctivePredicates(substitutedCondition), joinedges, children,
+          true, aliases, flags, fspecs, wspecs)
+
+      case Sort(order, global, child) =>
+        val (outputs, inputs, predicates, joinedges, children, _, aliases, flags, fspecs, wspecs)
+        = collectProjectsFiltersJoinsAndSort(child)
+        val substitutedOrder = order.map(substitute(aliases))
+        (outputs, inputs, predicates, joinedges, children, true, aliases, if (global) {
+          flags.setFlag(SORT).setFlag(GLOBAL)
+        } else {
+          flags.setFlag(SORT)
+        }, Seq(Seq(order)) ++ fspecs, wspecs)
+
+      case Join(left, right, joinType, condition) =>
+        val (loutputs, linputs, lpredicates, ljoinedges, lchildren, _, laliases, lflags, lfspecs,
+        lwspecs) = collectProjectsFiltersJoinsAndSort(left)
+        val (routputs, rinputs, rpredicates, rjoinedges, rchildren, _, raliases, rflags, rfspecs,
+        rwspecs) = collectProjectsFiltersJoinsAndSort(right)
+        val (lcondition, rcondition, ccondition) = split(condition, lchildren, rchildren)
+        val joinEdge = collectJoinEdge(ccondition, lchildren, rchildren, joinType)
+        val adjustedJoinEdges = rjoinedges
+          .map(e => JoinEdge(e.left + lchildren.size, e.right + lchildren.size, e.joinType))
+        val output: Seq[Attribute] = {
+          joinType match {
+            case LeftSemi =>
+              left.output
+            case LeftOuter =>
+              left.output ++ right.output.map(_.withNullability(true))
+            case RightOuter =>
+              left.output.map(_.withNullability(true)) ++ right.output
+            case FullOuter =>
+              left.output.map(_.withNullability(true)) ++ right.output.map(_.withNullability(true))
+            case LeftAnti =>
+              left.output
+            case _ =>
+              left.output ++ right.output
+          }
+        }
+        if (lfspecs.isEmpty && rfspecs.isEmpty && lflags == NoFlags && rflags == NoFlags &&
+            lwspecs.isEmpty && rwspecs.isEmpty) {
+          (output, (linputs ++ rinputs), lpredicates.flatMap(splitConjunctivePredicates) ++
+                                         rpredicates.flatMap(splitConjunctivePredicates) ++
+                                         lcondition ++ rcondition ++ ccondition, ljoinedges ++
+                                                                                 joinEdge ++
+                                                                                 adjustedJoinEdges,
+            lchildren ++ rchildren, true, laliases ++ raliases, NoFlags, Seq.empty, Seq.empty)
+        } else {
+          throw new UnsupportedOperationException(
+            s"unsupported join: \n left child ${ left } " +
+            s"\n right child ${ right }")
+        }
+
+      case other =>
+        (other.output, other.output, Nil, Nil, Seq(other), false, Map.empty, NoFlags, Seq.empty, Seq
+          .empty)
+    }
+  }
+
+  def collectAliases(fields: Seq[Expression]): Map[Attribute, Expression] = {
+    fields.collect {
+      case a@Alias(child, _) => a.toAttribute -> child
+    }.toMap
+  }
+
+  def substitute(aliases: Map[Attribute, Expression])(expr: Expression): Expression = {
+    expr.transform {
+      case a@Alias(ref: AttributeReference, name) =>
+        aliases.get(ref).map(Alias(_, name)(a.exprId, a.qualifier)).getOrElse(a)
+
+      case a: AttributeReference =>
+        aliases.get(a).map(Alias(_, a.name)(a.exprId, a.qualifier)).getOrElse(a)
+    }
+  }
+
+  def collectChildAliasMappings(attributeSet: Seq[Attribute], children: Seq[LogicalPlan]
+  ): Map[Int, String] = {
+    val aq = attributeSet.filter(_.qualifier.nonEmpty)
+    children.zipWithIndex.flatMap {
+      case (child, i) =>
+        aq.find(child.outputSet.contains(_)).map(_.qualifier).flatten.map((i, _))
+    }.toMap
+  }
+
+  def split(condition: Option[Expression],
+      lchildren: Seq[LogicalPlan],
+      rchildren: Seq[LogicalPlan]): (Seq[Expression], Seq[Expression], Seq[Expression]) = {
+    val left = lchildren.map(_.outputSet).foldLeft(AttributeSet(Set.empty))(_ ++ _)
+    val right = rchildren.map(_.outputSet).foldLeft(AttributeSet(Set.empty))(_ ++ _)
+    val conditions = condition.map(splitConjunctivePredicates).getOrElse(Nil)
+    val (leftEvaluationCondition, rest) = conditions.partition(_.references subsetOf left)
+    val (rightEvaluationCondition, commonCondition) = rest.partition(_.references subsetOf right)
+    (leftEvaluationCondition, rightEvaluationCondition, commonCondition)
+  }
+
+  /*
+   * collectJoinEdge only valid when condition are common condition of above split, left and
+   * right children correspond
+   * to respective two children parameters of above split
+   *
+   */
+  def collectJoinEdge(condition: Seq[Expression],
+      lchildren: Seq[LogicalPlan],
+      rchildren: Seq[LogicalPlan],
+      joinType: JoinType): Seq[JoinEdge] = {
+    val common = condition.map(_.references).foldLeft(AttributeSet(Set.empty))(_ ++ _)
+    val lIdxSeq = lchildren
+      .collect { case x if x.outputSet.intersect(common).nonEmpty => lchildren.indexOf(x) }
+    val rIdxSeq = rchildren
+      .collect { case x if x.outputSet.intersect(common).nonEmpty => rchildren.indexOf(x) +
+                                                                     lchildren.size
+      }
+    for (l <- lIdxSeq; r <- rIdxSeq) yield {
+      JoinEdge(l, r, joinType)
+    }
+  }
+
+}
+
+object ExtractSelectModuleForWindow extends PredicateHelper {
+  type ReturnType = (Seq[NamedExpression], Seq[Expression], Seq[Expression], Map[Int, String],
+    Seq[JoinEdge], Seq[LogicalPlan], FlagSet, Seq[Seq[Any]], Seq[Seq[Any]])
+
+  def unapply(plan: LogicalPlan): Option[ReturnType] = {
+    collectSelectFromWindowChild(plan)
+  }
+
+  def collectSelectFromWindowChild(plan: LogicalPlan): Option[(Seq[NamedExpression],
+    Seq[Expression], Seq[Expression], Map[Int, String], Seq[JoinEdge], Seq[LogicalPlan], FlagSet,
+    Seq[Seq[Any]], Seq[Seq[Any]])] = {
+    plan match {
+      case agg@Aggregate(_, _, _) =>
+        Some(
+          agg.aggregateExpressions,
+          agg.child.output,
+          Seq.empty,
+          Map.empty,
+          Seq.empty,
+          Seq(agg),
+          NoFlags,
+          Seq.empty,
+          Seq.empty)
+      case ExtractSelectModule(
+      output,
+      input,
+      predicate,
+      aliasmap,
+      joinedge,
+      children,
+      flags,
+      fspec,
+      wspec) =>
+        Some(output, input, predicate, aliasmap, joinedge, children, flags, fspec, wspec)
+      case Window(exprs, _, _, child) =>
+        val ret: Option[(Seq[NamedExpression], Seq[Expression], Seq[Expression], Map[Int,
+          String], Seq[JoinEdge], Seq[LogicalPlan], FlagSet, Seq[Seq[Any]], Seq[Seq[Any]])] =
+          collectSelectFromWindowChild(
+            child)
+        ret.map(r => (r._1, r._2, r._3, r._4, r._5, r._6, r._7, r._8, Seq(Seq(exprs)) ++ r._9))
+      case other => None
+    }
+  }
+}
+
+/**
+ * GroupByModule is extracted from the Aggregate node of logical plan.
+ * The groupingExpressions, aggregateExpressions are collected.
+ *
+ * The returned values for this match are as follows:
+ *  - Grouping attributes for the Aggregate node.
+ *  - Aggregates for the Aggregate node.
+ *  - Project list for project
+ *
+ */
+
+object ExtractGroupByModule extends PredicateHelper {
+  type ReturnType = (Seq[NamedExpression], Seq[Expression], Seq[Expression], Option[String],
+    LogicalPlan, FlagSet, Seq[Seq[Any]])
+
+  def unapply(plan: LogicalPlan): Option[ReturnType] = {
+    plan match {
+      case a@logical.Aggregate(_, _, e@Expand(_, _, p: Project)) if isGroupingSet(a, e, p) =>
+        // Assumption: Aggregate's groupingExpressions is composed of
+        // 1) the grouping attributes
+        // 2) gid, which is always the last one
+        val g = a.groupingExpressions.map(_.asInstanceOf[Attribute])
+        val numOriginalOutput = e.output.size - g.size
+        Some(
+          a.aggregateExpressions,
+          e.output,
+          a.groupingExpressions,
+          None,
+          p,
+          NoFlags.setFlag(EXPAND),
+          Seq(Seq(e.projections, e.output, numOriginalOutput)))
+      case logical.Aggregate(groupingExpressions, aggregateExpressions, child) =>
+        Some(
+          aggregateExpressions,
+          child.output,
+          groupingExpressions,
+          None,
+          child,
+          NoFlags,
+          Seq.empty)
+      case other => None
+    }
+  }
+
+  private def isGroupingSet(a: Aggregate, e: Expand, p: Project): Boolean = {
+    assert(a.child == e && e.child == p)
+
+    if (a.groupingExpressions.forall(_.isInstanceOf[Attribute])) {
+      val g = a.groupingExpressions.map(_.asInstanceOf[Attribute])
+      sameOutput(
+        e.output.drop(e.output.size - g.size),
+        a.groupingExpressions.map(_.asInstanceOf[Attribute]))
+    } else {
+      false
+    }
+  }
+
+  private def sameOutput(output1: Seq[Attribute], output2: Seq[Attribute]): Boolean = {
+    output1.size == output2.size &&
+    output1.zip(output2).forall(pair => pair._1.semanticEquals(pair._2))
+  }
+}
+
+object ExtractUnionModule extends PredicateHelper {
+  type ReturnType = (Seq[LogicalPlan], FlagSet, Seq[Seq[Any]])
+
+  def unapply(plan: LogicalPlan): Option[ReturnType] = {
+    plan match {
+      case u: Union =>
+        val children = collectUnionChildren(u)
+        Some(children, NoFlags, Seq.empty)
+      case _ => None
+    }
+  }
+
+  private def collectUnionChildren(plan: LogicalPlan): List[LogicalPlan] = {
+    plan match {
+      case Union(children) => children.toList match {
+        case head :: Nil => collectUnionChildren(head)
+        case head :: tail => collectUnionChildren(head) ++ collectUnionChildren(Union(tail))
+        case Nil => Nil
+      }
+      case other => other :: Nil
+    }
+  }
+}
+
+object ExtractTableModule extends PredicateHelper {
+  type ReturnType = (String, String, Seq[NamedExpression], Seq[LogicalPlan], FlagSet, Seq[Seq[Any]])
+
+  def unapply(plan: LogicalPlan): Option[ReturnType] = {
+    plan match {
+      // uncomment for cloudera1 version
+//      case m: CatalogRelation =>
+//        Some(m.tableMeta.database, m.tableMeta.identifier.table, m.output, Nil, NoFlags,
+//          Seq.empty)
+//       uncomment for apache version
+      case m: HiveTableRelation =>
+            Some(m.tableMeta.database, m.tableMeta.identifier.table, m.output, Nil, NoFlags,
+              Seq.empty)
+      case l: LogicalRelation =>
+        val tableIdentifier = l.catalogTable.map(_.identifier)
+        val database = tableIdentifier.map(_.database).flatten.getOrElse(null)
+        val table = tableIdentifier.map(_.table).getOrElse(null)
+        Some(database, table, l.output, Nil, NoFlags, Seq.empty)
+      case l: LocalRelation => // used for unit test
+        Some(null, null, l.output, Nil, NoFlags, Seq.empty)
+      case _ => None
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/LogicalPlanSignatureGenerator.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/LogicalPlanSignatureGenerator.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/LogicalPlanSignatureGenerator.scala
new file mode 100644
index 0000000..0c5661e
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/LogicalPlanSignatureGenerator.scala
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.util
+
+import org.apache.spark.sql.catalyst.catalog._
+import org.apache.spark.sql.catalyst.plans.logical._
+import org.apache.spark.sql.execution.datasources.LogicalRelation
+
+import org.apache.carbondata.mv.plans._
+
+object CheckSPJG {
+
+  def isSPJG(subplan: LogicalPlan): Boolean = {
+    subplan match {
+      case a: Aggregate =>
+        a.child.collect {
+          case Join(_, _, _, _) | Project(_, _) | Filter(_, _) |
+//               CatalogRelation(_, _, _) |
+               LogicalRelation(_, _, _) | LocalRelation(_, _) => true
+          case _ => false
+        }.forall(identity)
+      case _ => false
+    }
+  }
+}
+
+object LogicalPlanSignatureGenerator extends SignatureGenerator[LogicalPlan] {
+  lazy val rule: SignatureRule[LogicalPlan] = LogicalPlanRule
+
+  override def generate(plan: LogicalPlan): Option[Signature] = {
+    if ( plan.isSPJG ) {
+      super.generate(plan)
+    } else {
+      None
+    }
+  }
+}
+
+object LogicalPlanRule extends SignatureRule[LogicalPlan] {
+
+  def apply(plan: LogicalPlan, childSignatures: Seq[Option[Signature]]): Option[Signature] = {
+
+    plan match {
+      case LogicalRelation(_, _, _) =>
+        // TODO: implement this (link to BaseRelation)
+        None
+//      case CatalogRelation(tableMeta, _, _) =>
+//        Some(Signature(false,
+//          Set(Seq(tableMeta.database, tableMeta.identifier.table).mkString("."))))
+      case l: LocalRelation =>
+        // LocalRelation is for unit test cases
+        Some(Signature(groupby = false, Set(l.toString())))
+      case Filter(_, _) =>
+        if (childSignatures.length == 1 && !childSignatures(0).getOrElse(Signature()).groupby) {
+          // if (!childSignatures(0).getOrElse(Signature()).groupby) {
+          childSignatures(0)
+          // }
+        } else {
+          None
+        }
+      case Project(_, _) =>
+        if ( childSignatures.length == 1 && !childSignatures(0).getOrElse(Signature()).groupby ) {
+          childSignatures(0)
+        } else {
+          None
+        }
+      case Join(_, _, _, _) =>
+        if ( childSignatures.length == 2 &&
+             !childSignatures(0).getOrElse(Signature()).groupby &&
+             !childSignatures(1).getOrElse(Signature()).groupby ) {
+          Some(Signature(false,
+            childSignatures(0).getOrElse(Signature()).datasets
+              .union(childSignatures(1).getOrElse(Signature()).datasets)))
+        } else {
+          None
+        }
+      case Aggregate(_, _, _) =>
+        if ( childSignatures.length == 1 && !childSignatures(0).getOrElse(Signature()).groupby ) {
+          Some(Signature(true, childSignatures(0).getOrElse(Signature()).datasets))
+        } else {
+          None
+        }
+      case _ => None
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Printers.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Printers.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Printers.scala
new file mode 100644
index 0000000..b7641d5
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Printers.scala
@@ -0,0 +1,347 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.util
+
+import java.io.{OutputStream, PrintWriter, StringWriter}
+
+import org.apache.spark.sql.catalyst.expressions.{Expression, NamedExpression, _}
+import org.apache.spark.sql.catalyst.plans.JoinType
+import org.apache.spark.sql.catalyst.util.quoteIdentifier
+
+import org.apache.carbondata.mv.plans.modular.Flags._
+import org.apache.carbondata.mv.plans.util.SQLBuildDSL._
+
+// scalastyle:off println
+trait Printers {
+
+  abstract class FragmentPrinter(out: PrintWriter) {
+    protected var indentMargin = 0
+    protected val indentStep = 2
+    protected var indentString = "                                        " // 40
+
+    def indent(): Unit = indentMargin += indentStep
+
+    def undent(): Unit = indentMargin -= indentStep
+
+    def println(): Unit = {
+      out.println()
+      while (indentMargin > indentString.length()) {
+        indentString += indentString
+      }
+      if (indentMargin > 0) {
+        out.write(indentString, 0, indentMargin)
+      }
+    }
+
+    def printSeq[a](ls: List[a])(printelem: a => Unit)(printsep: => Unit) {
+      ls match {
+        case List() =>
+        case List(x) => printelem(x)
+        case x :: rest => printelem(x); printsep; printSeq(rest)(printelem)(printsep)
+      }
+    }
+
+    def printColumn(ts: List[Fragment], start: String, sep: String, end: String) {
+      print(start)
+      indent
+      println()
+      printSeq(ts) { print(_) } { print(sep); println() }
+      undent
+      println()
+      print(end)
+    }
+
+    def printRow(ts: List[Fragment], start: String, sep: String, end: String) {
+      print(start)
+      printSeq(ts) { print(_) } { print(sep) }
+      print(end)
+    }
+
+    def printRow(ts: List[Fragment], sep: String) { printRow(ts, "", sep, "") }
+
+    def printFragment(frag: Fragment): Unit
+
+    def print(args: Any*): Unit = {
+      args foreach {
+        case frag: Fragment =>
+          printFragment(frag)
+        case arg =>
+          out.print(if (arg == null) {
+            "null"
+          } else {
+            arg.toString
+          })
+      }
+    }
+  }
+
+  /**
+   * A sql fragment printer which is stingy about vertical whitespace and unnecessary
+   * punctuation.
+   */
+  class SQLFragmentCompactPrinter(out: PrintWriter) extends FragmentPrinter(out) {
+
+    object Position extends Enumeration {
+      type Position = Value
+      val Select, From, GroupBy, Where, Sort, Limit = Value
+    }
+
+    import Position._
+
+    def printSelect(select: Seq[NamedExpression], flags: FlagSet): Unit = {
+      if (flags.hasFlag(DISTINCT)) {
+        print("SELECT DISTINCT %s "
+          .format(select.map(_.sql) mkString ", "))
+      } else {
+        print("SELECT %s ".format(select.map(_.sql) mkString ", "))
+      }
+    }
+
+    def printFromElem(f: (SQLBuildDSL.Fragment, Option[JoinType], Seq[Expression])): Unit = {
+      f._2 map { t => print("%s JOIN ".format(t.sql)) }
+      printFragment(f._1)
+      if (f._3.nonEmpty) {
+        print(" ON %s".format(f._3.map(_.sql).mkString(" AND ")))
+      }
+    }
+
+    def printFrom(
+        from: Seq[(SQLBuildDSL.Fragment, Option[JoinType], Seq[Expression])]): Seq[Unit] = {
+      def fromIndented(
+          x: (SQLBuildDSL.Fragment, Option[JoinType], Seq[Expression])): Unit = {
+        indent
+        println()
+        printFromElem(x)
+        undent
+      }
+
+      print("FROM")
+      from map fromIndented
+    }
+
+    def printWhere(where: Seq[Expression]): Unit = {
+      if (where != Nil) {
+        print("WHERE")
+        //        if (where.nonEmpty) {
+        //          val condition = where(0)
+        //          val dt = condition.dataType
+        //          val t = condition.sql
+        //          print(t)
+        //        }
+        indent
+        println()
+        print("%s".format(where.map(_.sql).mkString(" AND ")))
+        undent
+      }
+    }
+
+    def printGroupby(groupby: (Seq[Expression], Seq[Seq[Expression]])): Unit = {
+
+      if (groupby._1.nonEmpty) {
+        print("GROUP BY %s".format(groupby._1.map(_.sql).mkString(", ")))
+        if (groupby._2.nonEmpty) {
+          print(" GROUPING SETS(%s)"
+            .format(groupby._2.map(e => s"(${ e.map(_.sql).mkString(", ") })").mkString(", ")))
+        }
+      }
+    }
+
+    def printHaving(having: Seq[Expression]): Unit = {
+      if (having != Nil) {
+        print("HAVING %s".format(having.map(_.sql).mkString(" AND ")))
+      }
+    }
+
+    def printUnion(sqls: Seq[Fragment]): Unit = {
+      sqls match {
+        case sql1 :: sql2 :: rest =>
+          printFragment(sql1)
+          println()
+          print("UNION ALL")
+          println()
+          printUnion(sql2 :: rest)
+        case sql :: Nil => printFragment(sql)
+      }
+    }
+
+    def printTable(name: Seq[String]): Unit = {
+      print("%s".format(name.last))
+    }
+
+    trait ExprSeq extends Seq[SortOrder]
+
+    def printExprModifiers(modifiers: (FlagSet, Seq[Seq[Any]]), pos: Position): Unit = {
+      val flagsNeedExprs = for {flag <- pickledListOrder
+                                if (modifiers._1.hasFlag(flag))} yield {
+        (flag)
+      }
+
+      flagsNeedExprs.zip(modifiers._2).foreach {
+        case (SORT, Seq(order)) =>
+          if (pos == Sort) { // TODO: how to verify type of order:  && order
+            // .isInstanceOf[Seq[SortOrder]]) {
+            println()
+            if (modifiers._1.hasFlag(GLOBAL)) {
+              print("ORDER BY %s"
+                .format(order.asInstanceOf[Seq[SortOrder]].map {
+                  // walk around for a sparkSQL bug
+                  s => {
+                         s.child match {
+                           case a: Alias =>
+                             val qualifierPrefix = a.qualifier
+                               .map(_ + ".").getOrElse("")
+                             s"$qualifierPrefix${
+                               quoteIdentifier(a
+                                 .name)
+                             }"
+
+                           case other => other.sql
+                         }
+                       } + " " + s.direction.sql + " " + s.nullOrdering.sql
+                }.mkString(", ")))
+            } else {
+              print("SORT BY %s".format(order.asInstanceOf[Seq[SortOrder]].map {
+                // walk around for a sparkSQL bug
+                s => {
+                       s.child match {
+                         case a: Alias =>
+                           val qualifierPrefix = a.qualifier.map(_ + ".")
+                             .getOrElse("")
+                           s"$qualifierPrefix${ quoteIdentifier(a.name) }"
+
+                         case other => other.sql
+                       }
+                     } + " " + s.direction.sql + " " + s.nullOrdering.sql
+              }.mkString(", ")))
+            }
+          }
+        case (LIMIT, Seq(limitExpr)) =>
+          if (pos == Limit && limitExpr.isInstanceOf[Expression]) {
+            println()
+            print(s"LIMIT ${ limitExpr.asInstanceOf[Expression].sql }")
+          }
+        case (EXPAND, Seq(_, _)) =>
+        case _ =>
+          throw new UnsupportedOperationException(s"unsupported modifiers: ${
+            flagToString(modifiers
+              ._1)
+          }")
+      }
+    }
+
+    override def printFragment(frag: Fragment): Unit = {
+      require(
+        frag.Supported,
+        "Fragment is not supported.  Current frag:\n" + frag)
+
+      frag match {
+        case spjg@SPJGFragment(select, from, where, groupby, having, alias, modifiers) =>
+          if (alias.nonEmpty) {
+            print("(")
+          }
+          printSelect(select, modifiers._1)
+          if (from.nonEmpty) {
+            println()
+          }
+          printFrom(from)
+          if (where.nonEmpty) {
+            println()
+          }
+          printWhere(where)
+          if (groupby._1.nonEmpty) {
+            println()
+          }
+          printGroupby(groupby)
+          if (having.nonEmpty) {
+            println()
+          }
+          printHaving(having)
+          if (modifiers._1.hasFlag(SORT)) {
+            printExprModifiers(modifiers, Sort)
+          }
+          if (modifiers._1.hasFlag(LIMIT)) {
+            printExprModifiers(modifiers, Limit)
+          }
+          alias.map { case a: String => print(") %s ".format(a)) }
+        case union@UNIONFragment(sqls, alias, modifiers) =>
+          if (alias.nonEmpty) {
+            print("(")
+          }
+          printUnion(sqls)
+          if (modifiers._1.hasFlag(LIMIT)) {
+            printExprModifiers(modifiers, Limit)
+          }
+          alias.map { case a: String => print(") %s ".format(a)) }
+        case table@TABLEFragment(name, alias, modifiers) =>
+          printTable(name)
+          alias.map { case a: String =>
+            if (!(a == name.last)) {
+              print(" %s ".format(a))
+            }
+          }
+        case _ =>
+          throw new UnsupportedOperationException(s"unsupported plan $frag")
+      }
+    }
+  }
+
+  /**
+   * A sql fragment printer which is one single line for a fragment.
+   */
+  class SQLFragmentOneLinePrinter(out: PrintWriter) extends SQLFragmentCompactPrinter(out) {
+    protected val sep = "  "
+
+    override def println() { print(sep) }
+  }
+
+  /** @group Printers */
+  protected def render(what: Any, mkPrinter: PrintWriter => FragmentPrinter): String = {
+    val buffer = new StringWriter()
+    val writer = new PrintWriter(buffer)
+    var printer = mkPrinter(writer)
+
+    printer.print(what)
+    writer.flush()
+    buffer.toString
+  }
+
+  def asCompactString(t: Fragment): String = render(t, newSQLFragmentCompactPrinter)
+
+  def asOneLineString(t: Fragment): String = render(t, newSQLFragmentOneLinePrinter)
+
+  def newSQLFragmentCompactPrinter(writer: PrintWriter): SQLFragmentCompactPrinter = {
+    new SQLFragmentCompactPrinter(
+      writer)
+  }
+
+  def newSQLFragmentCompactPrinter(stream: OutputStream): SQLFragmentCompactPrinter = {
+    newSQLFragmentCompactPrinter(
+      new PrintWriter(stream))
+  }
+
+  def newSQLFragmentOneLinePrinter(writer: PrintWriter): SQLFragmentOneLinePrinter = {
+    new SQLFragmentOneLinePrinter(
+      writer)
+  }
+
+  def newSQLFragmentOneLinePrinter(stream: OutputStream): SQLFragmentOneLinePrinter = {
+    newSQLFragmentOneLinePrinter(
+      new PrintWriter(stream))
+  }
+}
+// scalastyle:on println

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuild.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuild.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuild.scala
new file mode 100644
index 0000000..8c52d28
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuild.scala
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.util
+
+import java.util.concurrent.atomic.AtomicLong
+
+class SQLBuild private (
+    nextSubqueryId: AtomicLong,
+    subqueryPrefix: String) {
+  self =>
+
+  def this(subqueryPrefix: String) =
+    this(new AtomicLong(0), subqueryPrefix)
+
+  def newSubqueryName(): String = s"${subqueryPrefix}${nextSubqueryId.getAndIncrement()}"
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuildDSL.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuildDSL.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuildDSL.scala
new file mode 100644
index 0000000..307fff0
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuildDSL.scala
@@ -0,0 +1,428 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.util
+
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeMap, AttributeReference, AttributeSet, BitwiseAnd, Cast, Expression, Grouping, GroupingID, Literal, NamedExpression, ShiftRight}
+import org.apache.spark.sql.catalyst.plans.JoinType
+import org.apache.spark.sql.types.{ByteType, IntegerType}
+
+import org.apache.carbondata.mv.plans.modular
+import org.apache.carbondata.mv.plans.modular.Flags._
+import org.apache.carbondata.mv.plans.modular.ModularPlan
+
+trait SQLBuildDSL {
+
+  abstract class Fragment extends Product {
+    // this class corresponding to TreeNode of SparkSQL or Tree of Scala compiler
+    def canEqual(that: Any): Boolean = {
+      throw new UnsupportedOperationException
+    }
+
+    def productArity: Int = throw new UnsupportedOperationException
+
+    def productElement(n: Int): Any = throw new UnsupportedOperationException
+
+    // TODO: add library needed for SQL building
+    def isUnsupported: Boolean
+
+    def Supported: Boolean
+
+    // def pos: Position
+    def alias: Option[String]
+  }
+
+  case class SPJGFragment(
+      select: Seq[NamedExpression] = Nil, // TODO: more general, change to Seq[Either[Fragment,
+      // NamedExpression]]
+      from: Seq[(Fragment, Option[JoinType], Seq[Expression])] = Nil,
+      where: Seq[Expression] = Nil,
+      groupby: (Seq[Expression], Seq[Seq[Expression]]) = (Nil, Nil),
+      having: Seq[Expression] = Nil,
+      alias: Option[String] = None,
+      modifiers: (FlagSet, Seq[Seq[Any]])) extends Fragment {
+    override def isUnsupported: Boolean = {
+      select == Nil || from == Nil ||
+      from.map(_._1.isUnsupported).foldLeft(false)(_ || _)
+    }
+
+    override def Supported: Boolean = !isUnsupported
+  }
+
+  //  TODO: find a scheme to break up fragmentExtract() using unapply
+  //
+  //  object SPJGFragment {
+  //    type ReturnType = (Seq[NamedExpression],Seq[(Fragment, Option[JoinType],Seq[Expression])
+  // ],Seq[Expression],Seq[Expression],Seq[Expression],Option[String])
+  //
+  //    def unapply(plan: ModularPlan): Option[ReturnType] = fragmentExtract(plan, None)
+  //  }
+
+  case class TABLEFragment(
+      table: Seq[String] = Nil,
+      alias: Option[String] = None,
+      modifiers: (FlagSet, Seq[Seq[Any]])) extends Fragment {
+    override def isUnsupported: Boolean = table == Nil
+
+    override def Supported: Boolean = !isUnsupported
+  }
+
+  case class UNIONFragment(
+      union: Seq[Fragment] = Nil,
+      alias: Option[String] = None,
+      modifiers: (FlagSet, Seq[Seq[Any]])) extends Fragment {
+    override def isUnsupported: Boolean = {
+      union == Nil ||
+      union.map(_.isUnsupported).foldLeft(false)(_ || _)
+    }
+
+    override def Supported: Boolean = !isUnsupported
+  }
+
+  val UnsupportedFragment: Fragment = new Fragment {
+    def isUnsupported = true
+
+    def Supported = false
+
+    def alias = None
+  }
+
+  /**
+   * Turns a bunch of string segments into a single string and separate each segment by a space.
+   * The segments are trimmed so only a single space appears in the separation.
+   * For example, `build("a", " b ", " c")` becomes "a b c".
+   */
+  protected def build(segments: String*): String = {
+    segments.map(_.trim).filter(_.nonEmpty).mkString(" ")
+  }
+
+  def fragmentExtract(plan: ModularPlan, alias: Option[String]): Fragment = {
+    if (plan.rewritten) {
+      fragmentExtract_Rewritten(plan, alias)
+    } else {
+      fragmentExtract_NonRewritten(plan, alias)
+    }
+  }
+
+  // Rewritten portion of query plan
+  private def fragmentExtract_Rewritten(
+      plan: ModularPlan,
+      alias: Option[String]): Fragment = {
+    plan match {
+      case s1@modular.Select(_, _, _, _, _,
+        Seq(g@modular.GroupBy(_, _, _, _,
+        s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
+        if (!s1.skip && !g.skip && !s2.skip) =>
+        extractRewrittenOrNonRewrittenSelectGroupBySelect(s1, g, s2, alias)
+
+      case s1@modular.Select(_, _, _, _, _,
+        Seq(g@modular.GroupBy(_, _, _, _,
+        s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
+        if (s1.skip && g.skip && s2.skip) =>
+        extractRewrittenOrNonRewrittenSelectGroupBySelect(s1, g, s2, alias)
+
+      case s1@modular.Select(_, _, _, _, _, Seq(g@modular.GroupBy(_, _, _, _,
+        s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
+        if (!s1.skip && !g.skip && s2.skip) =>
+        extractRewrittenSelectGroupBy(s1, g, alias)
+
+      case s1@modular.Select(_, _, _, _, _, Seq(s2@modular.Select(_, _, _, _, _, _, _, _, _, _)),
+        _, _, _, _) if (!s1.skip && s2.skip) =>
+        extractRewrittenSelect(s1, alias)
+
+      case other => extractSimpleOperator(other, alias)
+    }
+  }
+
+  // Non-rewritten portion of query plan
+  private def fragmentExtract_NonRewritten(
+      plan: ModularPlan,
+      alias: Option[String]): Fragment = {
+    plan match {
+      case s1@modular.Select(_, _, _, _, _,
+        Seq(g@modular.GroupBy(_, _, _, _,
+        s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
+        if (s1.aliasMap.isEmpty && !g.rewritten) =>
+        extractRewrittenOrNonRewrittenSelectGroupBySelect(s1, g, s2, alias)
+
+      case g@modular.GroupBy(_, _, _, _, s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)
+        if (g.alias.isEmpty && !s2.rewritten) =>
+        val fragmentList = s2.children.zipWithIndex
+          .map { case (child, index) => fragmentExtract(child, s2.aliasMap.get(index)) }
+        val fList = s2.joinEdges.map {
+          e => {
+            (e.right, (fragmentList(e.right), Some(e.joinType), s2
+              .extractRightEvaluableConditions(s2.children(e.left), s2.children(e.right))))
+          }
+        }.toMap
+        val from = (0 to fragmentList.length - 1)
+          .map(index => fList.get(index).getOrElse((fragmentList(index), None, Nil)))
+        val excludesPredicate = from.flatMap(_._3).toSet
+        val (select, (groupByExprs, groupingSet)) = addGroupingSetIfNeeded(g, s2)
+        SPJGFragment(
+          select,
+          from,
+          s2.predicateList.filter { p => !excludesPredicate(p) },
+          (groupByExprs, groupingSet),
+          Nil,
+          alias,
+          (g.flags, g.flagSpec))
+
+      case g@modular.GroupBy(_, _, _, _, _, _, _, _) if (g.alias.nonEmpty) =>
+        val from = Seq((fragmentExtract(g.child, g.alias), None, Nil))
+        SPJGFragment(
+          g.outputList,
+          from,
+          Nil,
+          (g.predicateList, Seq.empty),
+          Nil,
+          alias,
+          (g.flags, g.flagSpec))
+
+      case other => extractSimpleOperator(other, alias)
+    }
+  }
+
+  // used in both rewritten and non-rewritten cases
+  // currently in rewritten cases we don't consider grouping set
+  private def extractRewrittenOrNonRewrittenSelectGroupBySelect(
+      s1: modular.Select,
+      g: modular.GroupBy,
+      s2: modular.Select,
+      alias: Option[String]): Fragment = {
+    val fragmentList = s2.children.zipWithIndex
+      .map { case (child, index) => fragmentExtract(child, s2.aliasMap.get(index)) }
+    val fList = s2.joinEdges.map {
+      e => {
+        (e.right, (fragmentList(e.right), Some(e.joinType), s2
+          .extractRightEvaluableConditions(s2.children(e.left), s2.children(e.right))))
+      }
+    }.toMap
+    val from = (0 to fragmentList.length - 1)
+      .map(index => fList.get(index).getOrElse((fragmentList(index), None, Nil)))
+    val excludesPredicate = from.flatMap(_._3).toSet
+    val aliasMap = AttributeMap(g.outputList.collect { case a: Alias => (a.toAttribute, a) })
+    val windowExprs = s1.windowSpec
+      .map { case Seq(expr) => expr.asInstanceOf[Seq[NamedExpression]] }
+      .foldLeft(Seq.empty.asInstanceOf[Seq[NamedExpression]])(_ ++ _)
+    val having = s1.predicateList
+      .map { case attr: Attribute => aliasMap.get(attr).map(_.child)
+        .getOrElse(attr);
+      case expr: Expression => expr.transform { case a: Alias => a.child };
+      case other => other
+      }
+
+    val (select_g, (groupByExprs, groupingSet)) = addGroupingSetIfNeeded(g, s2)
+
+    val gSet = AttributeSet(g.outputList.map(_.toAttribute))
+    val sSet = AttributeSet(s1.outputList.map(_.toAttribute))
+    val select = if (groupingSet.nonEmpty) {
+      if (gSet.equals(sSet) && windowExprs.isEmpty) {
+        select_g
+      } else {
+        throw new UnsupportedOperationException
+      }
+    } else
+    // TODO:  how to handle alias of attribute in MV
+    {
+      s1.outputList.map { attr => aliasMap.get(attr.toAttribute).getOrElse(attr) } ++ windowExprs
+    }
+
+    SPJGFragment(
+      select, // select
+      from, // from
+      s2.predicateList.filter { p => !excludesPredicate(p) }, // where
+      (groupByExprs, groupingSet), // group by
+      having, // having
+      alias,
+      (s1.flags, s1.flagSpec))
+  }
+
+  // used in rewritten cases only -- don't consider grouping set
+  private def extractRewrittenSelectGroupBy(
+      s1: modular.Select,
+      g: modular.GroupBy,
+      alias: Option[String]): Fragment = {
+    val fragment = fragmentExtract(g.child, g.alias)
+    val from = Seq((fragment, None, Nil))
+    val aliasMap = AttributeMap(g.outputList.collect { case a: Alias => (a.toAttribute, a) })
+    val windowExprs = s1.windowSpec
+      .map { case Seq(expr) => expr.asInstanceOf[Seq[NamedExpression]] }
+      .foldLeft(Seq.empty.asInstanceOf[Seq[NamedExpression]])(_ ++ _)
+    val having = s1.predicateList
+      .map { case attr: Attribute => aliasMap.get(attr).map(_.child)
+        .getOrElse(attr);
+      case expr: Expression => expr.transform { case a: Alias => a.child };
+      case other => other
+      }
+
+    val (select_g, (groupByExprs, groupingSet)) = (g.outputList, (g.predicateList, Seq.empty))
+
+    // TODO:  how to handle alias of attribute in MV
+    val select = s1.outputList.map { attr => aliasMap.get(attr.toAttribute).getOrElse(attr) } ++
+                 windowExprs
+    SPJGFragment(
+      select, // select
+      from, // from
+      Nil, // where
+      (groupByExprs, groupingSet), // group by
+      having, // having
+      alias,
+      (s1.flags, s1.flagSpec))
+  }
+
+  private def extractRewrittenSelect(s1: modular.Select, alias: Option[String]): Fragment = {
+    val fragment = fragmentExtract(s1.children(0), s1.aliasMap.get(0))
+    val from = Seq((fragment, None, Nil))
+    val windowExprs = s1.windowSpec
+      .map { case Seq(expr) => expr.asInstanceOf[Seq[NamedExpression]] }
+      .foldLeft(Seq.empty.asInstanceOf[Seq[NamedExpression]])(_ ++ _)
+    val select = s1.outputList.map(_.toAttribute)
+
+    SPJGFragment(
+      select, // select
+      from, // from
+      s1.predicateList, // where
+      (Nil, Nil), // group by
+      Nil, // having
+      alias,
+      (s1.flags, s1.flagSpec))
+  }
+
+  private def extractSimpleOperator(
+      operator: ModularPlan,
+      alias: Option[String]): Fragment = {
+    operator match {
+      case s@modular.Select(_, _, _, _, _, _, _, _, _, _) =>
+        val fragmentList = s.children.zipWithIndex
+          .map { case (child, index) => fragmentExtract(child, s.aliasMap.get(index)) }
+        val fList = s.joinEdges.map {
+          e => {
+            (e.right, (fragmentList(e.right), Some(e.joinType), s
+              .extractRightEvaluableConditions(s.children(e.left), s.children(e.right))))
+          }
+        }.toMap
+        val from = (0 to fragmentList.length - 1)
+          .map(index => fList.get(index).getOrElse((fragmentList(index), None, Nil)))
+        val excludesPredicate = from.flatMap(_._3).toSet
+        val windowExprs = s.windowSpec
+          .map { case Seq(expr) => expr.asInstanceOf[Seq[NamedExpression]] }
+          .foldLeft(Seq.empty.asInstanceOf[Seq[NamedExpression]])(_ ++ _)
+        val select = s.outputList ++ windowExprs
+
+        SPJGFragment(
+          select, // select
+          from, // from
+          s.predicateList.filter { p => !excludesPredicate(p) }, // where
+          (Nil, Nil), // group by
+          Nil, // having
+          alias,
+          (s.flags, s.flagSpec))
+
+      case u@modular.Union(_, _, _) =>
+        UNIONFragment(
+          u.children.zipWithIndex.map { case (child, index) => fragmentExtract(child, None) },
+          alias,
+          (u.flags, u.flagSpec))
+
+      case d@modular.ModularRelation(_, _, _, _, _) =>
+        if (d.databaseName != null && d.tableName != null) {
+          TABLEFragment(
+            Seq(d.databaseName, d.tableName), alias, (d.flags, d.rest))
+        } else {
+          TABLEFragment(Seq((d.output).toString()), alias, (d.flags, d.rest))
+        }
+
+      case h@modular.HarmonizedRelation(_) =>
+        fragmentExtract(h.source, alias)
+
+      case _ => UnsupportedFragment
+    }
+  }
+
+  private def addGroupingSetIfNeeded(g: modular.GroupBy, s: modular.Select) = {
+    if (g.flags.hasFlag(EXPAND)) {
+      assert(g.predicateList.length > 1)
+      val flagsNeedExprs =
+        for {flag <- pickledListOrder if (g.flags.hasFlag(flag))} yield {
+          flag
+        }
+      flagsNeedExprs.zip(g.flagSpec).collect {
+        case (EXPAND, Seq(projections_, output_, numOriginalOutput_)) =>
+          val output = output_.asInstanceOf[Seq[Attribute]]
+          val projections = projections_.asInstanceOf[Seq[Seq[Expression]]]
+          val numOriginalOutput = numOriginalOutput_.asInstanceOf[Int]
+
+          // The last column of Expand is always grouping ID
+          val gid = output.last
+
+          val groupByAttributes = g.predicateList.dropRight(1).map(_.asInstanceOf[Attribute])
+          // Assumption: project's projectList is composed of
+          // 1) the original output (Project's child.output),
+          // 2) the aliased group by expressions.
+          val expandedAttributes = s.output.drop(numOriginalOutput)
+          val groupByExprs = s.outputList.drop(numOriginalOutput).map(_.asInstanceOf[Alias].child)
+
+          // a map from group by attributes to the original group by expressions.
+          val groupByAttrMap = AttributeMap(groupByAttributes.zip(groupByExprs))
+          // a map from expanded attributes to the original group by expressions.
+          val expandedAttrMap = AttributeMap(expandedAttributes.zip(groupByExprs))
+
+          val groupingSet: Seq[Seq[Expression]] = projections.map { project =>
+            // Assumption: expand.projections is composed of
+            // 1) the original output (Project's child.output),
+            // 2) expanded attributes(or null literal)
+            // 3) gid, which is always the last one in each project in Expand
+            project.drop(numOriginalOutput).dropRight(1).collect {
+              case attr: Attribute if expandedAttrMap.contains(attr) => expandedAttrMap(attr)
+            }
+          }
+
+          val aggExprs = g.outputList.map { case aggExpr =>
+            val originalAggExpr = aggExpr.transformDown {
+              // grouping_id() is converted to VirtualColumn.groupingIdName by Analyzer. Revert
+              // it back.
+              case ar: AttributeReference if ar == gid => GroupingID(Nil)
+              case ar: AttributeReference if groupByAttrMap.contains(ar) => groupByAttrMap(ar)
+              case a@Cast(
+              BitwiseAnd(
+              ShiftRight(ar: AttributeReference, Literal(value: Any, IntegerType)),
+              Literal(1, IntegerType)), ByteType, None) if ar == gid =>
+                // for converting an expression to its original SQL format grouping(col)
+                val idx = groupByExprs.length - 1 - value.asInstanceOf[Int]
+                groupByExprs.lift(idx).map(Grouping).getOrElse(a)
+            }
+
+            originalAggExpr match {
+              // Ancestor operators may reference the output of this grouping set,
+              // and we use exprId to generate a unique name for each attribute, so we should
+              // make sure the transformed aggregate expression won't change the output,
+              // i.e. exprId and alias name should remain the same.
+              case ne: NamedExpression if ne.exprId == aggExpr.exprId => ne
+              case e => Alias(e, aggExpr.name)(exprId = aggExpr.exprId)
+            }
+          }
+          (aggExprs, (groupByExprs, groupingSet))
+
+      }.head
+    } else {
+      (g.outputList, (g.predicateList, Seq.empty))
+    }
+  }
+}
+
+object SQLBuildDSL extends SQLBuildDSL

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuilder.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuilder.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuilder.scala
new file mode 100644
index 0000000..4bc8b97
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/SQLBuilder.scala
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.util
+
+import java.util.concurrent.atomic.AtomicLong
+
+import scala.collection.immutable
+
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeMap, AttributeReference, AttributeSet, Expression, NamedExpression}
+import org.apache.spark.sql.catalyst.rules.{Rule, RuleExecutor}
+
+import org.apache.carbondata.mv.expressions.modular._
+import org.apache.carbondata.mv.plans._
+import org.apache.carbondata.mv.plans.modular.{Select, _}
+import org.apache.carbondata.mv.plans.util.SQLBuildDSL.Fragment
+
+// TODO: generalize this using SQLBuild to handle nested sub-query expression, using the
+// correspondence
+//      SQLBuild <--> QueryRewrite
+class SQLBuilder private(
+    modularPlan: ModularPlan,
+    nextSubqueryId: AtomicLong,
+    subqueryPrefix: String) {
+
+  def this(modularPlan: ModularPlan) = {
+    this(
+      SQLBuilder.subqueryPreprocess(modularPlan, new AtomicLong()),
+      new AtomicLong(0),
+      "gen_subquery_")
+  }
+
+  // only allow one level scalar subquery
+  def this(modularPlan: ModularPlan, subqueryPrefix: String) = {
+    this(SQLBuilder.screenOutPlanWithSubquery(modularPlan), new AtomicLong(0), subqueryPrefix)
+  }
+
+  private def newSubqueryName(): String = {
+    s"${ subqueryPrefix }${
+      nextSubqueryId.getAndIncrement()
+    }"
+  }
+
+  def fragmentExtract: Fragment = {
+    val finalPlan: ModularPlan = SQLizer.execute(modularPlan)
+    SQLBuildDSL.fragmentExtract(finalPlan, None)
+  }
+
+  object SQLizer extends RuleExecutor[ModularPlan] {
+    override protected def batches: Seq[Batch] = {
+      Seq(
+        Batch(
+          "Recover Scoping Info", Once,
+          // Clean up qualifiers due to erasure of sub-query aliases to use base tables as
+          // qualifiers
+          // This complements AddSubquery to handle corner case qMV3 in Tpcds_1_4_Querybatch.scala
+          CleanupQualifier,
+          // Insert sub queries on top of operators that need to appear after FROM clause.
+          AddSubquery
+        )
+      )
+    }
+
+    object CleanupQualifier extends Rule[ModularPlan] {
+      override def apply(tree: ModularPlan): ModularPlan = {
+        tree transformDown {
+          case s@modular.Select(_, _, _, _, _,
+            Seq(g@modular.GroupBy(_, _, _, _,
+            s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
+            if !s.rewritten && s2.children.forall { _.isInstanceOf[modular.LeafNode] } =>
+            val attrMap = AttributeMap(s2.outputList
+              .collect { case a: Alias if a.child.isInstanceOf[Attribute] => (a.toAttribute, a) })
+            cleanupQualifier(s, s2.aliasMap, s2.children, attrMap)
+
+          case g@modular.GroupBy(_, _, _, _,
+            s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)
+            if !g.rewritten && s2.children.forall { _.isInstanceOf[modular.LeafNode] } =>
+            val attrMap = AttributeMap(s2.outputList
+              .collect { case a: Alias if a.child.isInstanceOf[Attribute] => (a.toAttribute, a) })
+            cleanupQualifier(g, s2.aliasMap, s2.children, attrMap)
+
+          case s@modular.Select(_, _, _, _, _, _, _, _, _, _)
+            if !s.rewritten && s.children.forall { _.isInstanceOf[modular.LeafNode] } =>
+            cleanupQualifier(s, s.aliasMap, s.children, AttributeMap(Seq.empty[(Attribute, Alias)]))
+
+        }
+      }
+
+      private def cleanupQualifier(node: ModularPlan,
+          aliasMap: Map[Int, String],
+          children: Seq[ModularPlan],
+          attrMap: AttributeMap[Alias]): ModularPlan = {
+        node transform {
+          case plan if !plan.rewritten && !plan.isInstanceOf[modular.LeafNode] =>
+            plan.transformExpressions {
+              case ref: Attribute =>
+                val i = children.indexWhere(_.outputSet.contains(ref))
+                if (i > -1) {
+                  // this is a walk around for mystery of spark qualifier
+                  if (aliasMap.nonEmpty && aliasMap(i).nonEmpty) {
+                    AttributeReference(
+                      ref.name,
+                      ref.dataType)(exprId = ref.exprId, qualifier = Option(aliasMap(i)))
+                  } else {
+                    ref
+                  }
+                } else {
+                  attrMap.get(ref) match {
+                    case Some(alias) => Alias(alias.child, alias.name)(exprId = alias.exprId)
+                    case None => ref
+                  }
+                }
+            }
+        }
+      }
+    }
+
+    object AddSubquery extends Rule[ModularPlan] {
+      override def apply(tree: ModularPlan): ModularPlan = {
+        tree transformUp {
+          case s@modular.Select(_, _, _, _, _,
+            Seq(g@modular.GroupBy(_, _, _, _,
+            s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)), _, _, _, _)
+            if s2.children.forall { _.isInstanceOf[modular.LeafNode] } => s
+
+          case g@modular.GroupBy(_, _, _, _,
+            s2@modular.Select(_, _, _, _, _, _, _, _, _, _), _, _, _)
+            if s2.children.forall { _.isInstanceOf[modular.LeafNode] } => g
+
+          case s@modular.Select(_, _, _, _, _, _, _, _, _, _)
+            if !s.rewritten && !s.children.forall { _.isInstanceOf[modular.LeafNode] } =>
+            var list: List[(Int, String)] = List()
+            var newS = s.copy()
+            s.children.zipWithIndex.filterNot { _._1.isInstanceOf[modular.LeafNode] }.foreach {
+              case (child: ModularPlan, index) if (!s.aliasMap.contains(index)) =>
+                val subqueryName = newSubqueryName()
+                val windowAttributeSet = if (child.isInstanceOf[Select]) {
+                  val windowExprs = child.asInstanceOf[Select].windowSpec
+                    .map { case Seq(expr) => expr.asInstanceOf[Seq[NamedExpression]] }
+                    .foldLeft(Seq.empty.asInstanceOf[Seq[NamedExpression]])(_ ++ _)
+                  SQLBuilder.collectAttributeSet(windowExprs)
+                } else {
+                  AttributeSet.empty
+                }
+                val subqueryAttributeSet = child.outputSet ++ windowAttributeSet
+                //              TODO: how to use alias to avoid duplicate names with distinct
+                // exprIds
+                //              val duplicateNames = collectDuplicateNames(subqueryAttributeSet)
+                // .toSet
+                if (SQLBuilder.collectDuplicateNames(subqueryAttributeSet).nonEmpty) {
+                  new UnsupportedOperationException(s"duplicate name(s): ${
+                    child.output
+                      .map(_.toString + ", ")
+                  }")
+                }
+                list = list :+ ((index, subqueryName))
+                newS = newS.transformExpressions {
+                  case ref: Attribute if (subqueryAttributeSet.contains(ref)) =>
+                    AttributeReference(ref.name, ref.dataType)(
+                      exprId = ref.exprId,
+                      qualifier = Some(subqueryName))
+                  case alias: Alias if (subqueryAttributeSet.contains(alias.toAttribute)) =>
+                    Alias(alias.child, alias.name)(
+                      exprId = alias.exprId,
+                      qualifier = Some(subqueryName))
+                }
+
+              case _ =>
+            }
+            if (list.nonEmpty) {
+              list = list ++ s.aliasMap.toSeq
+              newS.copy(aliasMap = list.toMap)
+            } else {
+              newS
+            }
+
+          case g@modular.GroupBy(_, _, _, _, _, _, _, _) if (!g.rewritten && g.alias.isEmpty) =>
+            val newG = if (g.outputList.isEmpty) {
+              val ol = g.predicateList.map { case a: Attribute => a }
+              g.copy(outputList = ol)
+            } else {
+              g
+            }
+            val subqueryName = newSubqueryName()
+            val subqueryAttributeSet = newG.child.outputSet
+            if (SQLBuilder.collectDuplicateNames(subqueryAttributeSet).nonEmpty) {
+              new UnsupportedOperationException(s"duplicate name(s): ${
+                newG.child.output.map(_.toString + ", ")
+              }")
+            }
+            newG.transformExpressions {
+              case ref: AttributeReference if (subqueryAttributeSet.contains(ref)) =>
+                AttributeReference(ref.name, ref.dataType)(
+                  exprId = ref.exprId,
+                  qualifier = Some(subqueryName))
+              case alias: Alias if (subqueryAttributeSet.contains(alias.toAttribute)) =>
+                Alias(alias.child, alias.name)(
+                  exprId = alias.exprId,
+                  qualifier = Some(subqueryName))
+            }.copy(alias = Some(subqueryName))
+        }
+      }
+    }
+  }
+
+}
+
+object SQLBuilder {
+  def collectAttributeSet(outputList: Seq[Expression]): AttributeSet = {
+    AttributeSet(outputList
+      .collect {
+        case a@Alias(_, _) => a.toAttribute
+        case a: Attribute => a
+      })
+  }
+
+  def collectDuplicateNames(s: AttributeSet): immutable.Iterable[String] = {
+    s.baseSet.map(_.a).groupBy(_.name).collect { case (x, ys) if ys.size > 1 => x }
+  }
+
+  def subqueryPreprocess(plan: ModularPlan, nextScalarSubqueryId: AtomicLong): ModularPlan = {
+    def newSubqueryName(nextScalarSubqueryId: AtomicLong): String = {
+      s"gen_expression_${
+        nextScalarSubqueryId
+          .getAndIncrement()
+      }_"
+    }
+
+    plan.transformAllExpressions {
+      case s: ModularSubquery =>
+        if (s.children.isEmpty) {
+          val subqueryName = newSubqueryName(nextScalarSubqueryId)
+          SubqueryHolder(s"(${ s.plan.asOneLineSQL(subqueryName) })")
+        } else {
+          throw new UnsupportedOperationException(s"Expression $s can't converted to SQL")
+        }
+      case o => o
+    }
+  }
+
+  def screenOutPlanWithSubquery(plan: ModularPlan): ModularPlan = {
+    plan.transformAllExpressions {
+      case s: ModularSubquery =>
+        throw new UnsupportedOperationException(s"Nested scala subquery $s doesn't supported")
+      case e => e
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Signature.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Signature.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Signature.scala
new file mode 100644
index 0000000..c46124b
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/Signature.scala
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.util
+
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.catalyst.trees.TreeNode
+
+case class Signature(groupby: Boolean = true, datasets: Set[String] = Set.empty)
+
+abstract class SignatureRule[BaseType <: TreeNode[BaseType]] extends Logging {
+  def apply(subplan: BaseType, signatures: Seq[Option[Signature]]): Option[Signature]
+}
+
+abstract class SignatureGenerator[BaseType <: TreeNode[BaseType]] extends Logging {
+  protected val rule: SignatureRule[BaseType]
+
+  def generate(subplan: BaseType): Option[Signature] = {
+    generateUp(subplan)
+  }
+
+  protected def generateChildren(subplan: BaseType, nextOperation: BaseType => Option[Signature]
+  ): Seq[Option[Signature]] = {
+    subplan.children.map { child =>
+      nextOperation(child.asInstanceOf[BaseType])
+    }
+  }
+
+  def generateUp(subplan: BaseType): Option[Signature] = {
+    val childSignatures = generateChildren(subplan, t => generateUp(t))
+    val lastSignature = rule(subplan, childSignatures)
+    lastSignature
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/TableCluster.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/TableCluster.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/TableCluster.scala
new file mode 100644
index 0000000..021518c
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/TableCluster.scala
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.plans.util
+
+import com.fasterxml.jackson.annotation.{JsonCreator, JsonProperty, JsonRawValue}
+import com.google.common.base.Objects
+
+class TableCluster @JsonCreator()(@JsonProperty("fact") @JsonRawValue fact: Set[String],
+    @JsonProperty("dimension") @JsonRawValue dimension: Set[String]) {
+
+  //  @JsonProperty
+  def getFact(): Set[String] = {
+    fact
+  }
+
+  //
+  //  @JsonProperty
+  def getDimension(): Set[String] = {
+    dimension
+  }
+
+  @Override
+  override def toString: String = {
+    Objects.toStringHelper(this)
+      .add("fact", fact)
+      .add("dimension", dimension)
+      .toString
+  }
+
+  /*
+  @Override
+  def toString = {
+    MoreObjects.toStringHelper(this)
+    .add("fact", fact)
+    .add("dimension", dimension)
+    .toString
+  }
+  *
+  */
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ffddba70/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/ModularPlanTest.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/ModularPlanTest.scala b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/ModularPlanTest.scala
new file mode 100644
index 0000000..0c68a1f
--- /dev/null
+++ b/datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/testutil/ModularPlanTest.scala
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.testutil
+
+import java.io.File
+
+import org.apache.spark.sql.catalyst.expressions._
+import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
+import org.apache.spark.sql.catalyst.plans.logical._
+import org.apache.spark.sql.catalyst.util._
+import org.apache.spark.sql.test.util.{PlanTest, QueryTest}
+
+import org.apache.carbondata.mv.plans.modular
+import org.apache.carbondata.mv.plans.modular.{ModularPlan, OneRowTable, Select}
+import org.apache.carbondata.mv.plans.modular.Flags._
+
+/**
+ * Provides helper methods for comparing plans.
+ */
+abstract class ModularPlanTest extends QueryTest with PredicateHelper {
+
+  /**
+   * Since attribute references are given globally unique ids during analysis,
+   * we must normalize them to check if two different queries are identical.
+   */
+  protected def normalizeExprIds(plan: ModularPlan): plan.type = {
+    plan transformAllExpressions {
+      case s: ScalarSubquery =>
+        s.copy(exprId = ExprId(0))
+      case e: Exists =>
+        e.copy(exprId = ExprId(0))
+      case l: ListQuery =>
+        l.copy(exprId = ExprId(0))
+      case a: AttributeReference =>
+        AttributeReference(a.name, a.dataType, a.nullable)(exprId = ExprId(0))
+      case a: Alias =>
+        Alias(a.child, a.name)(exprId = ExprId(0))
+      case ae: AggregateExpression =>
+        ae.copy(resultId = ExprId(0))
+    }
+  }
+
+  /**
+   * Normalizes plans:
+   * - Filter the filter conditions that appear in a plan. For instance,
+   * ((expr 1 && expr 2) && expr 3), (expr 1 && expr 2 && expr 3), (expr 3 && (expr 1 && expr 2)
+   *   etc., will all now be equivalent.
+   * - Sample the seed will replaced by 0L.
+   * - Join conditions will be resorted by hashCode.
+   */
+  protected def normalizePlan(plan: LogicalPlan): LogicalPlan = {
+    plan transform {
+      case filter@Filter(condition: Expression, child: LogicalPlan) =>
+        Filter(
+          splitConjunctivePredicates(condition).map(rewriteEqual(_)).sortBy(_.hashCode())
+            .reduce(And), child)
+      case sample: Sample =>
+        sample.copy(seed = 0L)(true)
+      case join@Join(left, right, joinType, condition) if condition.isDefined =>
+        val newCondition =
+          splitConjunctivePredicates(condition.get).map(rewriteEqual(_)).sortBy(_.hashCode())
+            .reduce(And)
+        Join(left, right, joinType, Some(newCondition))
+    }
+  }
+
+  /**
+   * Rewrite [[EqualTo]] and [[EqualNullSafe]] operator to keep order. The following cases will be
+   * equivalent:
+   * 1. (a = b), (b = a);
+   * 2. (a <=> b), (b <=> a).
+   */
+  private def rewriteEqual(condition: Expression): Expression = {
+    condition match {
+      case eq@EqualTo(l: Expression, r: Expression) =>
+        Seq(l, r).sortBy(_.hashCode()).reduce(EqualTo)
+      case eq@EqualNullSafe(l: Expression, r: Expression) =>
+        Seq(l, r).sortBy(_.hashCode()).reduce(EqualNullSafe)
+      case _ => condition // Don't reorder.
+    }
+  }
+
+  //
+  //  /** Fails the test if the two plans do not match */
+  //  protected def comparePlans(plan1: LogicalPlan, plan2: LogicalPlan) {
+  //    val normalized1 = normalizePlan(normalizeExprIds(plan1))
+  //    val normalized2 = normalizePlan(normalizeExprIds(plan2))
+  //    if (normalized1 != normalized2) {
+  //      fail(
+  //        s"""
+  //          |== FAIL: Plans do not match ===
+  //          |${sideBySide(normalized1.treeString, normalized2.treeString).mkString("\n")}
+  //         """.stripMargin)
+  //    }
+  //  }
+  //
+  //  /** Fails the test if the two expressions do not match */
+  //  protected def compareExpressions(e1: Expression, e2: Expression): Unit = {
+  //    comparePlans(Filter(e1, OneRowRelation), Filter(e2, OneRowRelation))
+  //  }
+  //
+  //  /** Fails the test if the join order in the two plans do not match */
+  //  protected def compareJoinOrder(plan1: LogicalPlan, plan2: LogicalPlan) {
+  //    val normalized1 = normalizePlan(normalizeExprIds(plan1))
+  //    val normalized2 = normalizePlan(normalizeExprIds(plan2))
+  //    if (!sameJoinPlan(normalized1, normalized2)) {
+  //      fail(
+  //        s"""
+  //           |== FAIL: Plans do not match ===
+  //           |${sideBySide(normalized1.treeString, normalized2.treeString).mkString("\n")}
+  //         """.stripMargin)
+  //    }
+  //  }
+  //
+  //  /** Consider symmetry for joins when comparing plans. */
+  //  private def sameJoinPlan(plan1: LogicalPlan, plan2: LogicalPlan): Boolean = {
+  //    (plan1, plan2) match {
+  //      case (j1: Join, j2: Join) =>
+  //        (sameJoinPlan(j1.left, j2.left) && sameJoinPlan(j1.right, j2.right)) ||
+  //          (sameJoinPlan(j1.left, j2.right) && sameJoinPlan(j1.right, j2.left))
+  //      case (p1: Project, p2: Project) =>
+  //        p1.projectList == p2.projectList && sameJoinPlan(p1.child, p2.child)
+  //      case _ =>
+  //        plan1 == plan2
+  //    }
+  //  }
+  /** Fails the test if the corresponding pairs of plans do not match */
+  protected def comparePlanCollections(planSet1: Seq[String], planSet2: Seq[String]) {
+    for ((plan1, plan2) <- planSet1 zip planSet2) {
+      compareMessages(plan1, plan2)
+    }
+  }
+
+  /** Fails the test if the two plans do not match */
+  /** Only expressionIds are normalized.  This is enough for our test cases */
+  /** For more general normalization, see Spark PlanTest.scala for Logical Plan */
+  protected def comparePlans(plan1: ModularPlan, plan2: ModularPlan) {
+    val normalized1 = normalizeExprIds(plan1)
+    val normalized2 = normalizeExprIds(plan2)
+    if (normalized1 != normalized2) {
+      fail(
+        s"""
+           |== FAIL: Plans do not match ===
+           |${ sideBySide(normalized1.treeString, normalized1.treeString).mkString("\n") }
+         """.stripMargin)
+    }
+  }
+
+  /** Fails the test if the two expressions do not match */
+  protected def compareExpressions(e1: Seq[Expression], e2: Seq[Expression]): Unit = {
+    comparePlans(
+      Select(Nil, Nil, e1, Map.empty, Nil, Seq(OneRowTable), NoFlags, Seq.empty, Seq.empty), modular
+        .Select(Nil, Nil, e2, Map.empty, Nil, Seq(OneRowTable), NoFlags, Seq.empty, Seq.empty))
+  }
+
+  protected def compareMessages(msg1: String, msg2: String) {
+    if (msg1 != msg2) {
+      fail(
+        s"""
+           |== FAIL: Messages do not match ==
+           |${ sideBySide(msg1, msg2).mkString("\n") }
+           """.stripMargin)
+    }
+  }
+}


[09/50] [abbrv] carbondata git commit: [CARBONDATA-2416] Support DEFERRED REBUILD when creating DataMap

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapStatus.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapStatus.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapStatus.scala
index 67effda..cccfb3f 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapStatus.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapStatus.scala
@@ -17,6 +17,7 @@
 
 package org.apache.carbondata.spark.testsuite.datamap
 
+import java.io.File
 import java.util
 
 import scala.collection.JavaConverters._
@@ -26,14 +27,12 @@ import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.common.exceptions.sql.MalformedDataMapCommandException
 import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.datamap.dev.{DataMapRefresher, DataMapWriter}
 import org.apache.carbondata.core.datamap.dev.cgdatamap.{CoarseGrainDataMap, CoarseGrainDataMapFactory}
+import org.apache.carbondata.core.datamap.dev.{DataMapBuilder, DataMapWriter}
 import org.apache.carbondata.core.datamap.status.{DataMapStatus, DataMapStatusManager}
 import org.apache.carbondata.core.datamap.{DataMapDistributable, DataMapMeta, Segment}
 import org.apache.carbondata.core.datastore.page.ColumnPage
 import org.apache.carbondata.core.features.TableOperation
-import org.apache.carbondata.core.datastore.row.CarbonRow
-import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, DataMapSchema}
 import org.apache.carbondata.core.scan.filter.intf.ExpressionType
 import org.apache.carbondata.core.util.CarbonProperties
@@ -44,10 +43,11 @@ class TestDataMapStatus extends QueryTest with BeforeAndAfterAll {
   val testData = s"$resourcesPath/sample.csv"
 
   override def beforeAll: Unit = {
+    new File(CarbonProperties.getInstance().getSystemFolderLocation).delete()
     drop
   }
 
-  test("datamap status disable for new datamap") {
+  test("datamap status enable for new datamap") {
     sql("DROP TABLE IF EXISTS datamapstatustest")
     sql(
       """
@@ -64,11 +64,33 @@ class TestDataMapStatus extends QueryTest with BeforeAndAfterAll {
 
     assert(details.length == 1)
 
+    assert(details.exists(p => p.getDataMapName.equals("statusdatamap") && p.getStatus == DataMapStatus.ENABLED))
+    sql("DROP TABLE IF EXISTS datamapstatustest")
+  }
+
+  test("datamap status disable for new datamap with deferred rebuild") {
+    sql("DROP TABLE IF EXISTS datamapstatustest")
+    sql(
+      """
+        | CREATE TABLE datamapstatustest(id int, name string, city string, age int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+    sql(
+      s"""create datamap statusdatamap on table datamapstatustest
+         |using '${classOf[TestDataMapFactory].getName}'
+         |with deferred rebuild
+         |dmproperties('index_columns'='name')
+         | """.stripMargin)
+
+    val details = DataMapStatusManager.readDataMapStatusDetails()
+
+    assert(details.length == 1)
+
     assert(details.exists(p => p.getDataMapName.equals("statusdatamap") && p.getStatus == DataMapStatus.DISABLED))
     sql("DROP TABLE IF EXISTS datamapstatustest")
   }
 
-  test("datamap status disable after new load") {
+  test("datamap status disable after new load  with deferred rebuild") {
     sql("DROP TABLE IF EXISTS datamapstatustest1")
     sql(
       """
@@ -78,6 +100,7 @@ class TestDataMapStatus extends QueryTest with BeforeAndAfterAll {
     sql(
       s"""create datamap statusdatamap1 on table datamapstatustest1
          |using '${classOf[TestDataMapFactory].getName}'
+         |with deferred rebuild
          |dmproperties('index_columns'='name')
          | """.stripMargin)
 
@@ -94,8 +117,7 @@ class TestDataMapStatus extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS datamapstatustest1")
   }
 
-  // enable it in PR2255
-  ignore("datamap status with refresh datamap") {
+  test("datamap status with REBUILD DATAMAP") {
     sql("DROP TABLE IF EXISTS datamapstatustest2")
     sql(
       """
@@ -105,6 +127,7 @@ class TestDataMapStatus extends QueryTest with BeforeAndAfterAll {
     sql(
       s"""create datamap statusdatamap2 on table datamapstatustest2
          |using '${classOf[TestDataMapFactory].getName}'
+         |with deferred rebuild
          |dmproperties('index_columns'='name')
          | """.stripMargin)
 
@@ -119,7 +142,7 @@ class TestDataMapStatus extends QueryTest with BeforeAndAfterAll {
     assert(details.length == 1)
     assert(details.exists(p => p.getDataMapName.equals("statusdatamap2") && p.getStatus == DataMapStatus.DISABLED))
 
-    sql(s"refresh datamap statusdatamap2 on table datamapstatustest2")
+    sql(s"REBUILD DATAMAP statusdatamap2 on table datamapstatustest2")
 
     details = DataMapStatusManager.readDataMapStatusDetails()
     assert(details.length == 1)
@@ -128,8 +151,7 @@ class TestDataMapStatus extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS datamapstatustest2")
   }
 
-  // enable it in PR2255
-  ignore("datamap create without on table test") {
+  test("datamap create without on table test") {
     sql("DROP TABLE IF EXISTS datamapstatustest3")
     sql(
       """
@@ -144,10 +166,20 @@ class TestDataMapStatus extends QueryTest with BeforeAndAfterAll {
            | """.stripMargin)
 
     }
+    sql("DROP TABLE IF EXISTS datamapstatustest3")
+  }
 
+  test("rebuild datamap status") {
+    sql("DROP TABLE IF EXISTS datamapstatustest3")
+    sql(
+      """
+        | CREATE TABLE datamapstatustest3(id int, name string, city string, age int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
     sql(
       s"""create datamap statusdatamap3 on table datamapstatustest3
          |using '${classOf[TestDataMapFactory].getName}'
+         |with deferred rebuild
          |dmproperties('index_columns'='name')
          | """.stripMargin)
 
@@ -162,7 +194,7 @@ class TestDataMapStatus extends QueryTest with BeforeAndAfterAll {
     assert(details.length == 1)
     assert(details.exists(p => p.getDataMapName.equals("statusdatamap3") && p.getStatus == DataMapStatus.DISABLED))
 
-    sql(s"refresh datamap statusdatamap3")
+    sql(s"REBUILD DATAMAP statusdatamap3")
 
     details = DataMapStatusManager.readDataMapStatusDetails()
     assert(details.length == 1)
@@ -245,8 +277,19 @@ class TestDataMapFactory(
     false
   }
 
-  override def createRefresher(segment: Segment,
-      shardName: String): DataMapRefresher = {
-    ???
+  override def createBuilder(segment: Segment,
+      shardName: String): DataMapBuilder = {
+    return new DataMapBuilder {
+      override def initialize(): Unit = { }
+
+      override def addRow(blockletId: Int,
+          pageId: Int,
+          rowId: Int,
+          values: Array[AnyRef]): Unit = { }
+
+      override def finish(): Unit = { }
+
+      override def close(): Unit = { }
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SearchModeTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SearchModeTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SearchModeTestCase.scala
index c193fcf..2f3488e 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SearchModeTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SearchModeTestCase.scala
@@ -28,8 +28,6 @@ import org.apache.carbondata.spark.util.DataGenerator
  * Test Suite for search mode
  */
 
-// TODO: Need to Fix
-@Ignore
 class SearchModeTestCase extends QueryTest with BeforeAndAfterAll {
 
   val numRows = 500 * 1000

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/TestInsertAndOtherCommandConcurrent.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/TestInsertAndOtherCommandConcurrent.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/TestInsertAndOtherCommandConcurrent.scala
index 9c5297d..3cabc7b 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/TestInsertAndOtherCommandConcurrent.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/TestInsertAndOtherCommandConcurrent.scala
@@ -24,11 +24,12 @@ import java.util.concurrent.{Callable, ExecutorService, Executors, Future}
 import scala.collection.JavaConverters._
 
 import org.apache.spark.sql.test.util.QueryTest
+import org.apache.spark.sql.types.StructType
 import org.apache.spark.sql.{DataFrame, SaveMode}
 import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.datamap.dev.{DataMapRefresher, DataMapWriter}
+import org.apache.carbondata.core.datamap.dev.{DataMapBuilder, DataMapWriter}
 import org.apache.carbondata.core.datamap.dev.cgdatamap.{CoarseGrainDataMap, CoarseGrainDataMapFactory}
 import org.apache.carbondata.core.datamap.{DataMapDistributable, DataMapMeta, Segment}
 import org.apache.carbondata.core.datastore.page.ColumnPage
@@ -43,18 +44,30 @@ import org.apache.carbondata.events.Event
 // This testsuite test insert and insert overwrite with other commands concurrently
 class TestInsertAndOtherCommandConcurrent extends QueryTest with BeforeAndAfterAll with BeforeAndAfterEach {
   private val executorService: ExecutorService = Executors.newFixedThreadPool(10)
-  var df: DataFrame = _
+  var testData: DataFrame = _
 
   override def beforeAll {
     dropTable()
     buildTestData()
 
+    createTable("orders", testData.schema)
+    createTable("orders_overwrite", testData.schema)
     sql(
       s"""
          | create datamap test on table orders
          | using '${classOf[WaitingDataMapFactory].getName}'
          | dmproperties('index_columns'='o_name')
        """.stripMargin)
+
+    testData.write
+      .format("carbondata")
+      .option("tableName", "temp_table")
+      .option("tempCSV", "false")
+      .mode(SaveMode.Overwrite)
+      .save()
+
+    sql(s"insert into orders select * from temp_table")
+    sql(s"insert into orders_overwrite select * from temp_table")
   }
 
   private def buildTestData(): Unit = {
@@ -66,23 +79,17 @@ class TestInsertAndOtherCommandConcurrent extends QueryTest with BeforeAndAfterA
     import sqlContext.implicits._
 
     val sdf = new SimpleDateFormat("yyyy-MM-dd")
-    df = sqlContext.sparkSession.sparkContext.parallelize(1 to 150000)
+    testData = sqlContext.sparkSession.sparkContext.parallelize(1 to 150000)
       .map(value => (value, new java.sql.Date(sdf.parse("2015-07-" + (value % 10 + 10)).getTime),
         "china", "aaa" + value, "phone" + 555 * value, "ASD" + (60000 + value), 14999 + value,
         "ordersTable" + value))
       .toDF("o_id", "o_date", "o_country", "o_name",
         "o_phonetype", "o_serialname", "o_salary", "o_comment")
-    createTable("orders")
-    createTable("orders_overwrite")
   }
 
-  private def createTable(tableName: String): Unit = {
-    df.write
-      .format("carbondata")
-      .option("tableName", tableName)
-      .option("tempCSV", "false")
-      .mode(SaveMode.Overwrite)
-      .save()
+  private def createTable(tableName: String, schema: StructType): Unit = {
+    val schemaString = schema.fields.map(x => x.name + " " + x.dataType.typeName).mkString(", ")
+    sql(s"CREATE TABLE $tableName ($schemaString) stored as carbondata")
   }
 
   override def afterAll {
@@ -91,7 +98,7 @@ class TestInsertAndOtherCommandConcurrent extends QueryTest with BeforeAndAfterA
   }
 
   override def beforeEach(): Unit = {
-    Global.overwriteRunning = false
+    Global.loading = false
   }
 
   private def dropTable() = {
@@ -101,12 +108,12 @@ class TestInsertAndOtherCommandConcurrent extends QueryTest with BeforeAndAfterA
 
   // run the input SQL and block until it is running
   private def runSqlAsync(sql: String): Future[String] = {
-    assert(!Global.overwriteRunning)
+    assert(!Global.loading)
     var count = 0
     val future = executorService.submit(
       new QueryTask(sql)
     )
-    while (!Global.overwriteRunning && count < 1000) {
+    while (!Global.loading && count < 1000) {
       Thread.sleep(10)
       // to avoid dead loop in case WaitingDataMapFactory is not invoked
       count += 1
@@ -202,9 +209,7 @@ class TestInsertAndOtherCommandConcurrent extends QueryTest with BeforeAndAfterA
     sql("drop table if exists t1")
 
     // number of segment is 1 after createTable
-    createTable("t1")
-    // number of segment is 2 after insert
-    sql("insert into table t1 select * from orders_overwrite")
+    createTable("t1", testData.schema)
 
     sql(
       s"""
@@ -212,6 +217,12 @@ class TestInsertAndOtherCommandConcurrent extends QueryTest with BeforeAndAfterA
          | using '${classOf[WaitingDataMapFactory].getName}'
          | dmproperties('index_columns'='o_name')
        """.stripMargin)
+
+    sql("insert into table t1 select * from orders_overwrite")
+    Thread.sleep(1100)
+    sql("insert into table t1 select * from orders_overwrite")
+    Thread.sleep(1100)
+
     val future = runSqlAsync("insert into table t1 select * from orders_overwrite")
     sql("alter table t1 compact 'MAJOR'")
     assert(future.get.contains("PASS"))
@@ -279,15 +290,13 @@ class TestInsertAndOtherCommandConcurrent extends QueryTest with BeforeAndAfterA
 }
 
 object Global {
-  var overwriteRunning = false
+  var loading = false
 }
 
 class WaitingDataMapFactory(
     carbonTable: CarbonTable,
     dataMapSchema: DataMapSchema) extends CoarseGrainDataMapFactory(carbonTable, dataMapSchema) {
 
-  private var identifier: AbsoluteTableIdentifier = carbonTable.getAbsoluteTableIdentifier
-
   override def fireEvent(event: Event): Unit = ???
 
   override def clear(segmentId: Segment): Unit = {}
@@ -311,14 +320,14 @@ class WaitingDataMapFactory(
 
       override def onBlockStart(blockId: String): Unit = {
         // trigger the second SQL to execute
-        Global.overwriteRunning = true
+        Global.loading = true
 
         // wait for 1 second to let second SQL to finish
         Thread.sleep(1000)
       }
 
       override def finish(): Unit = {
-
+        Global.loading = false
       }
     }
   }
@@ -341,8 +350,8 @@ class WaitingDataMapFactory(
     false
   }
 
-  override def createRefresher(segment: Segment,
-      shardName: String): DataMapRefresher = {
+  override def createBuilder(segment: Segment,
+      shardName: String): DataMapBuilder = {
     ???
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index fea3482..890f8fc 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -181,7 +181,8 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
   protected val ON = carbonKeyWord("ON")
   protected val DMPROPERTIES = carbonKeyWord("DMPROPERTIES")
   protected val SELECT = carbonKeyWord("SELECT")
-  protected val REFRESH = carbonKeyWord("REFRESH")
+  protected val REBUILD = carbonKeyWord("REBUILD")
+  protected val DEFERRED = carbonKeyWord("DEFERRED")
 
   protected val doubleQuotedString = "\"([^\"]+)\"".r
   protected val singleQuotedString = "'([^']+)'".r

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/java/org/apache/carbondata/datamap/DataMapProperty.java
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/java/org/apache/carbondata/datamap/DataMapProperty.java b/integration/spark2/src/main/java/org/apache/carbondata/datamap/DataMapProperty.java
deleted file mode 100644
index 043acb1..0000000
--- a/integration/spark2/src/main/java/org/apache/carbondata/datamap/DataMapProperty.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.datamap;
-
-import org.apache.carbondata.common.annotations.InterfaceAudience;
-
-/**
- * Property that can be specified when creating DataMap
- */
-@InterfaceAudience.Internal
-public class DataMapProperty {
-
-  /**
-   * Used to specify the store location of the datamap
-   */
-  public static final String PARTITIONING = "partitioning";
-  public static final String PATH = "path";
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/java/org/apache/carbondata/datamap/IndexDataMapProvider.java
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/java/org/apache/carbondata/datamap/IndexDataMapProvider.java b/integration/spark2/src/main/java/org/apache/carbondata/datamap/IndexDataMapProvider.java
index a5124a0..0642e01 100644
--- a/integration/spark2/src/main/java/org/apache/carbondata/datamap/IndexDataMapProvider.java
+++ b/integration/spark2/src/main/java/org/apache/carbondata/datamap/IndexDataMapProvider.java
@@ -105,7 +105,7 @@ public class IndexDataMapProvider extends DataMapProvider {
 
   @Override
   public void rebuild() {
-    IndexDataMapRefreshRDD.rebuildDataMap(sparkSession, getMainTable(), getDataMapSchema());
+    IndexDataMapRebuildRDD.rebuildDataMap(sparkSession, getMainTable(), getDataMapSchema());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java b/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
index 746a361..37d49e5 100644
--- a/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
+++ b/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
@@ -17,11 +17,15 @@
 
 package org.apache.carbondata.datamap;
 
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.carbondata.common.annotations.InterfaceAudience;
 import org.apache.carbondata.common.exceptions.sql.MalformedDataMapCommandException;
 import org.apache.carbondata.core.datamap.DataMapCatalog;
 import org.apache.carbondata.core.datamap.DataMapProvider;
 import org.apache.carbondata.core.datamap.dev.DataMapFactory;
+import org.apache.carbondata.core.metadata.schema.datamap.DataMapProperty;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.DataMapSchema;
 
@@ -55,9 +59,11 @@ public class PreAggregateDataMapProvider extends DataMapProvider {
   private void validateDmProperty(DataMapSchema dataMapSchema)
       throws MalformedDataMapCommandException {
     if (!dataMapSchema.getProperties().isEmpty()) {
-      if (dataMapSchema.getProperties().size() > 2 || (
-              !dataMapSchema.getProperties().containsKey(DataMapProperty.PATH) &&
-                      !dataMapSchema.getProperties().containsKey(DataMapProperty.PARTITIONING))) {
+      Map<String, String> properties = new HashMap<>(dataMapSchema.getProperties());
+      properties.remove(DataMapProperty.DEFERRED_REBUILD);
+      properties.remove(DataMapProperty.PATH);
+      properties.remove(DataMapProperty.PARTITIONING);
+      if (properties.size() > 0) {
         throw new MalformedDataMapCommandException(
                 "Only 'path' and 'partitioning' dmproperties are allowed for this datamap");
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala b/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
new file mode 100644
index 0000000..5902783
--- /dev/null
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
@@ -0,0 +1,318 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.datamap
+
+import java.io.{File, IOException}
+import java.text.SimpleDateFormat
+import java.util
+
+import scala.collection.JavaConverters._
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.mapred.JobConf
+import org.apache.hadoop.mapreduce.{Job, TaskAttemptID, TaskType}
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
+import org.apache.spark.{CarbonInputMetrics, Partition, TaskContext}
+import org.apache.spark.deploy.SparkHadoopUtil
+import org.apache.spark.sql.SparkSession
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.datamap.{DataMapStoreManager, Segment}
+import org.apache.carbondata.core.datamap.{DataMapRegistry, DataMapStoreManager, Segment}
+import org.apache.carbondata.core.datamap.dev.DataMapBuilder
+import org.apache.carbondata.core.datastore.impl.FileFactory
+import org.apache.carbondata.core.metadata.datatype.{DataType, DataTypes}
+import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, DataMapSchema, TableInfo}
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn
+import org.apache.carbondata.core.statusmanager.SegmentStatusManager
+import org.apache.carbondata.core.util.TaskMetricsMap
+import org.apache.carbondata.core.util.path.CarbonTablePath
+import org.apache.carbondata.hadoop.{CarbonInputSplit, CarbonMultiBlockSplit, CarbonProjection, CarbonRecordReader}
+import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
+import org.apache.carbondata.hadoop.readsupport.CarbonReadSupport
+import org.apache.carbondata.spark.{RefreshResult, RefreshResultImpl}
+import org.apache.carbondata.spark.rdd.{CarbonRDDWithTableInfo, CarbonSparkPartition}
+import org.apache.carbondata.spark.util.SparkDataTypeConverterImpl
+
+/**
+ * Helper object to rebuild the index DataMap
+ */
+object IndexDataMapRebuildRDD {
+
+  /**
+   * Rebuild the datamap for all existing data in the table
+   */
+  def rebuildDataMap(
+      sparkSession: SparkSession,
+      carbonTable: CarbonTable,
+      schema: DataMapSchema
+  ): Unit = {
+    val tableIdentifier = carbonTable.getAbsoluteTableIdentifier
+    val segmentStatusManager = new SegmentStatusManager(tableIdentifier)
+    val validAndInvalidSegments = segmentStatusManager.getValidAndInvalidSegments()
+    val validSegments = validAndInvalidSegments.getValidSegments
+    val indexedCarbonColumns = carbonTable.getIndexedColumns(schema)
+
+    // loop all segments to rebuild DataMap
+    validSegments.asScala.foreach { segment =>
+      // if lucene datamap folder is exists, not require to build lucene datamap again
+      refreshOneSegment(sparkSession, carbonTable, schema.getDataMapName,
+        indexedCarbonColumns, segment.getSegmentNo);
+    }
+  }
+
+  private def refreshOneSegment(
+      sparkSession: SparkSession,
+      carbonTable: CarbonTable,
+      dataMapName: String,
+      indexColumns: java.util.List[CarbonColumn],
+      segmentId: String): Unit = {
+
+    val dataMapStorePath =
+      CarbonTablePath.getSegmentPath(carbonTable.getTablePath, segmentId) +
+      File.separator +
+      dataMapName
+
+    if (!FileFactory.isFileExist(dataMapStorePath)) {
+      if (FileFactory.mkdirs(dataMapStorePath, FileFactory.getFileType(dataMapStorePath))) {
+        try {
+          val status = new IndexDataMapRebuildRDD[String, Boolean](
+            sparkSession,
+            new RefreshResultImpl(),
+            carbonTable.getTableInfo,
+            dataMapName,
+            indexColumns.asScala.toArray,
+            segmentId
+          ).collect()
+
+          status.find(_._2 == false).foreach { task =>
+            throw new Exception(
+              s"Task Failed to rebuild datamap $dataMapName on segment_$segmentId")
+          }
+        } catch {
+          case ex: Throwable =>
+            // process failure
+            FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(dataMapStorePath))
+            throw new Exception(
+              s"Failed to refresh datamap $dataMapName on segment_$segmentId", ex)
+        }
+      } else {
+        throw new IOException(s"Failed to create directory $dataMapStorePath")
+      }
+    }
+  }
+
+}
+
+class OriginalReadSupport(dataTypes: Array[DataType]) extends CarbonReadSupport[Array[Object]] {
+  override def initialize(carbonColumns: Array[CarbonColumn],
+      carbonTable: CarbonTable): Unit = {
+  }
+
+  override def readRow(data: Array[Object]): Array[Object] = {
+    dataTypes.zipWithIndex.foreach { case (dataType, i) =>
+      if (dataType == DataTypes.STRING) {
+        data(i) = data(i).toString
+      }
+    }
+    data
+  }
+
+  override def close(): Unit = {
+  }
+}
+
+class IndexDataMapRebuildRDD[K, V](
+    session: SparkSession,
+    result: RefreshResult[K, V],
+    @transient tableInfo: TableInfo,
+    dataMapName: String,
+    indexColumns: Array[CarbonColumn],
+    segmentId: String
+) extends CarbonRDDWithTableInfo[(K, V)](
+  session.sparkContext, Nil, tableInfo.serialize()) {
+
+  private val dataMapSchema = DataMapStoreManager.getInstance().getDataMapSchema(dataMapName)
+  private val queryId = sparkContext.getConf.get("queryId", System.nanoTime() + "")
+  private val jobTrackerId: String = {
+    val formatter = new SimpleDateFormat("yyyyMMddHHmm")
+    formatter.format(new util.Date())
+  }
+
+  override def internalCompute(split: Partition, context: TaskContext): Iterator[(K, V)] = {
+    val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
+    val dataMapFactory =
+      DataMapManager.get().getDataMapProvider(
+        CarbonTable.buildFromTableInfo(getTableInfo), dataMapSchema, session).getDataMapFactory
+    var status = false
+    val inputMetrics = new CarbonInputMetrics
+    TaskMetricsMap.getInstance().registerThreadCallback()
+    val inputSplit = split.asInstanceOf[CarbonSparkPartition].split.value
+    inputMetrics.initBytesReadCallback(context, inputSplit)
+
+    val attemptId = new TaskAttemptID(jobTrackerId, id, TaskType.MAP, split.index, 0)
+    val attemptContext = new TaskAttemptContextImpl(new Configuration(), attemptId)
+    val format = createInputFormat(attemptContext)
+
+    val model = format.createQueryModel(inputSplit, attemptContext)
+    // one query id per table
+    model.setQueryId(queryId)
+    model.setVectorReader(false)
+    model.setForcedDetailRawQuery(false)
+    model.setRequiredRowId(true)
+
+    var reader: CarbonRecordReader[Array[Object]] = null
+    var refresher: DataMapBuilder = null
+    try {
+      reader = new CarbonRecordReader(
+        model, new OriginalReadSupport(indexColumns.map(_.getDataType)), inputMetrics)
+      reader.initialize(inputSplit, attemptContext)
+
+      // we use task name as shard name to create the folder for this datamap
+      val shardName = CarbonTablePath.getShardName(inputSplit.getAllSplits.get(0).getBlockPath)
+      refresher = dataMapFactory.createBuilder(new Segment(segmentId), shardName)
+      refresher.initialize()
+
+      var blockletId = 0
+      var firstRow = true
+      while (reader.nextKeyValue()) {
+        val rowWithPosition = reader.getCurrentValue
+        val size = rowWithPosition.length
+        val pageId = rowWithPosition(size - 2).asInstanceOf[Int]
+        val rowId = rowWithPosition(size - 1).asInstanceOf[Int]
+
+        if (!firstRow && pageId == 0 && rowId == 0) {
+          // new blocklet started, increase blockletId
+          blockletId = blockletId + 1
+        } else {
+          firstRow = false
+        }
+
+        refresher.addRow(blockletId, pageId, rowId, rowWithPosition)
+      }
+
+      refresher.finish()
+
+      status = true
+    } finally {
+      if (reader != null) {
+        try {
+          reader.close()
+        } catch {
+          case ex: Throwable =>
+            LOGGER.error(ex, "Failed to close reader")
+        }
+      }
+
+      if (refresher != null) {
+        try {
+          refresher.close()
+        } catch {
+          case ex: Throwable =>
+            LOGGER.error(ex, "Failed to close index writer")
+        }
+      }
+    }
+
+    new Iterator[(K, V)] {
+
+      var finished = false
+
+      override def hasNext: Boolean = {
+        !finished
+      }
+
+      override def next(): (K, V) = {
+        finished = true
+        result.getKey(split.index.toString, status)
+      }
+    }
+  }
+
+
+  private def createInputFormat(
+      attemptContext: TaskAttemptContextImpl) = {
+    val format = new CarbonTableInputFormat[Object]
+    val tableInfo1 = getTableInfo
+    val conf = attemptContext.getConfiguration
+    CarbonInputFormat.setTableInfo(conf, tableInfo1)
+    CarbonInputFormat.setDatabaseName(conf, tableInfo1.getDatabaseName)
+    CarbonInputFormat.setTableName(conf, tableInfo1.getFactTable.getTableName)
+    CarbonInputFormat.setDataTypeConverter(conf, classOf[SparkDataTypeConverterImpl])
+
+    val identifier = tableInfo1.getOrCreateAbsoluteTableIdentifier()
+    CarbonInputFormat.setTablePath(
+      conf,
+      identifier.appendWithLocalPrefix(identifier.getTablePath))
+
+    CarbonInputFormat.setSegmentsToAccess(
+      conf,
+      Segment.toSegmentList(Array(segmentId), null))
+
+    CarbonInputFormat.setColumnProjection(
+      conf,
+      new CarbonProjection(indexColumns.map(_.getColName)))
+    format
+  }
+
+  override protected def getPartitions = {
+    if (!dataMapSchema.isIndexDataMap) {
+      throw new UnsupportedOperationException
+    }
+    val conf = new Configuration()
+    val jobConf = new JobConf(conf)
+    SparkHadoopUtil.get.addCredentials(jobConf)
+    val job = Job.getInstance(jobConf)
+    job.getConfiguration.set("query.id", queryId)
+
+    val format = new CarbonTableInputFormat[Object]
+
+    CarbonInputFormat.setSegmentsToAccess(
+      job.getConfiguration,
+      Segment.toSegmentList(Array(segmentId), null))
+
+    CarbonInputFormat.setTableInfo(
+      job.getConfiguration,
+      tableInfo)
+    CarbonInputFormat.setTablePath(
+      job.getConfiguration,
+      tableInfo.getOrCreateAbsoluteTableIdentifier().getTablePath)
+    CarbonInputFormat.setDatabaseName(
+      job.getConfiguration,
+      tableInfo.getDatabaseName)
+    CarbonInputFormat.setTableName(
+      job.getConfiguration,
+      tableInfo.getFactTable.getTableName)
+
+    format
+      .getSplits(job)
+      .asScala
+      .map(_.asInstanceOf[CarbonInputSplit])
+      .groupBy(_.taskId)
+      .map { group =>
+        new CarbonMultiBlockSplit(
+          group._2.asJava,
+          group._2.flatMap(_.getLocations).toArray)
+      }
+      .zipWithIndex
+      .map { split =>
+        new CarbonSparkPartition(id, split._2, split._1)
+      }
+      .toArray
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRefreshRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRefreshRDD.scala b/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRefreshRDD.scala
deleted file mode 100644
index c341c36..0000000
--- a/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRefreshRDD.scala
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.datamap
-
-import java.io.{File, IOException}
-import java.text.SimpleDateFormat
-import java.util
-
-import scala.collection.JavaConverters._
-
-import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.mapred.JobConf
-import org.apache.hadoop.mapreduce.{Job, TaskAttemptID, TaskType}
-import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
-import org.apache.spark.{CarbonInputMetrics, Partition, TaskContext}
-import org.apache.spark.deploy.SparkHadoopUtil
-import org.apache.spark.sql.SparkSession
-
-import org.apache.carbondata.common.logging.LogServiceFactory
-import org.apache.carbondata.core.datamap.{DataMapStoreManager, Segment}
-import org.apache.carbondata.core.datamap.dev.DataMapRefresher
-import org.apache.carbondata.core.datastore.impl.FileFactory
-import org.apache.carbondata.core.metadata.datatype.{DataType, DataTypes}
-import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, DataMapSchema, TableInfo}
-import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn
-import org.apache.carbondata.core.statusmanager.SegmentStatusManager
-import org.apache.carbondata.core.util.TaskMetricsMap
-import org.apache.carbondata.core.util.path.CarbonTablePath
-import org.apache.carbondata.hadoop.{CarbonInputSplit, CarbonMultiBlockSplit, CarbonProjection, CarbonRecordReader}
-import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
-import org.apache.carbondata.hadoop.readsupport.CarbonReadSupport
-import org.apache.carbondata.spark.{RefreshResult, RefreshResultImpl}
-import org.apache.carbondata.spark.rdd.{CarbonRDDWithTableInfo, CarbonSparkPartition}
-import org.apache.carbondata.spark.util.SparkDataTypeConverterImpl
-
-/**
- * Helper object to rebuild the index DataMap
- */
-object IndexDataMapRefreshRDD {
-
-  /**
-   * Rebuild the datamap for all existing data in the table
-   */
-  def rebuildDataMap(
-      sparkSession: SparkSession,
-      carbonTable: CarbonTable,
-      schema: DataMapSchema
-  ): Unit = {
-    val tableIdentifier = carbonTable.getAbsoluteTableIdentifier
-    val segmentStatusManager = new SegmentStatusManager(tableIdentifier)
-    val validAndInvalidSegments = segmentStatusManager.getValidAndInvalidSegments()
-    val validSegments = validAndInvalidSegments.getValidSegments
-    val indexedCarbonColumns = carbonTable.getIndexedColumns(schema)
-
-    // loop all segments to rebuild DataMap
-    validSegments.asScala.foreach { segment =>
-      // if lucene datamap folder is exists, not require to build lucene datamap again
-      refreshOneSegment(sparkSession, carbonTable, schema.getDataMapName,
-        indexedCarbonColumns, segment.getSegmentNo);
-    }
-  }
-
-  private def refreshOneSegment(
-      sparkSession: SparkSession,
-      carbonTable: CarbonTable,
-      dataMapName: String,
-      indexColumns: java.util.List[CarbonColumn],
-      segmentId: String): Unit = {
-
-    val dataMapStorePath =
-      CarbonTablePath.getSegmentPath(carbonTable.getTablePath, segmentId) +
-      File.separator +
-      dataMapName
-
-    if (!FileFactory.isFileExist(dataMapStorePath)) {
-      if (FileFactory.mkdirs(dataMapStorePath, FileFactory.getFileType(dataMapStorePath))) {
-        try {
-          val status = new IndexDataMapRefreshRDD[String, Boolean](
-            sparkSession,
-            new RefreshResultImpl(),
-            carbonTable.getTableInfo,
-            dataMapName,
-            indexColumns.asScala.toArray,
-            segmentId
-          ).collect()
-
-          status.find(_._2 == false).foreach { task =>
-            throw new Exception(
-              s"Task Failed to refresh datamap $dataMapName on segment_$segmentId")
-          }
-        } catch {
-          case ex: Throwable =>
-            // process failure
-            FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(dataMapStorePath))
-            throw new Exception(
-              s"Failed to refresh datamap $dataMapName on segment_$segmentId", ex)
-        }
-      } else {
-        throw new IOException(s"Failed to create directory $dataMapStorePath")
-      }
-    }
-  }
-
-}
-
-class OriginalReadSupport(dataTypes: Array[DataType]) extends CarbonReadSupport[Array[Object]] {
-  override def initialize(carbonColumns: Array[CarbonColumn],
-      carbonTable: CarbonTable): Unit = {
-  }
-
-  override def readRow(data: Array[Object]): Array[Object] = {
-    dataTypes.zipWithIndex.foreach { case (dataType, i) =>
-      if (dataType == DataTypes.STRING) {
-        data(i) = data(i).toString
-      }
-    }
-    data
-  }
-
-  override def close(): Unit = {
-  }
-}
-
-class IndexDataMapRefreshRDD[K, V](
-    session: SparkSession,
-    result: RefreshResult[K, V],
-    @transient tableInfo: TableInfo,
-    dataMapName: String,
-    indexColumns: Array[CarbonColumn],
-    segmentId: String
-) extends CarbonRDDWithTableInfo[(K, V)](
-  session.sparkContext, Nil, tableInfo.serialize()) {
-
-  private val dataMapSchema = DataMapStoreManager.getInstance().getDataMapSchema(dataMapName)
-  private val queryId = sparkContext.getConf.get("queryId", System.nanoTime() + "")
-  private val jobTrackerId: String = {
-    val formatter = new SimpleDateFormat("yyyyMMddHHmm")
-    formatter.format(new util.Date())
-  }
-
-  override def internalCompute(split: Partition, context: TaskContext): Iterator[(K, V)] = {
-    val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
-    val dataMapFactory =
-      DataMapManager.get().getDataMapProvider(
-        CarbonTable.buildFromTableInfo(getTableInfo), dataMapSchema, session).getDataMapFactory
-    var status = false
-    val inputMetrics = new CarbonInputMetrics
-    TaskMetricsMap.getInstance().registerThreadCallback()
-    val inputSplit = split.asInstanceOf[CarbonSparkPartition].split.value
-    inputMetrics.initBytesReadCallback(context, inputSplit)
-
-    val attemptId = new TaskAttemptID(jobTrackerId, id, TaskType.MAP, split.index, 0)
-    val attemptContext = new TaskAttemptContextImpl(new Configuration(), attemptId)
-    val format = createInputFormat(attemptContext)
-
-    val model = format.createQueryModel(inputSplit, attemptContext)
-    // one query id per table
-    model.setQueryId(queryId)
-    model.setVectorReader(false)
-    model.setForcedDetailRawQuery(false)
-    model.setRequiredRowId(true)
-
-    var reader: CarbonRecordReader[Array[Object]] = null
-    var refresher: DataMapRefresher = null
-    try {
-      reader = new CarbonRecordReader(
-        model, new OriginalReadSupport(indexColumns.map(_.getDataType)), inputMetrics)
-      reader.initialize(inputSplit, attemptContext)
-
-      // we use task name as shard name to create the folder for this datamap
-      val shardName = CarbonTablePath.getShardName(inputSplit.getAllSplits.get(0).getBlockPath)
-      refresher = dataMapFactory.createRefresher(new Segment(segmentId), shardName)
-      refresher.initialize()
-
-      var blockletId = 0
-      var firstRow = true
-      while (reader.nextKeyValue()) {
-        val rowWithPosition = reader.getCurrentValue
-        val size = rowWithPosition.length
-        val pageId = rowWithPosition(size - 2).asInstanceOf[Int]
-        val rowId = rowWithPosition(size - 1).asInstanceOf[Int]
-
-        if (!firstRow && pageId == 0 && rowId == 0) {
-          // new blocklet started, increase blockletId
-          blockletId = blockletId + 1
-        } else {
-          firstRow = false
-        }
-
-        refresher.addRow(blockletId, pageId, rowId, rowWithPosition)
-      }
-
-      refresher.finish()
-
-      status = true
-    } finally {
-      if (reader != null) {
-        try {
-          reader.close()
-        } catch {
-          case ex: Throwable =>
-            LOGGER.error(ex, "Failed to close reader")
-        }
-      }
-
-      if (refresher != null) {
-        try {
-          refresher.close()
-        } catch {
-          case ex: Throwable =>
-            LOGGER.error(ex, "Failed to close index writer")
-        }
-      }
-    }
-
-    new Iterator[(K, V)] {
-
-      var finished = false
-
-      override def hasNext: Boolean = {
-        !finished
-      }
-
-      override def next(): (K, V) = {
-        finished = true
-        result.getKey(split.index.toString, status)
-      }
-    }
-  }
-
-
-  private def createInputFormat(
-      attemptContext: TaskAttemptContextImpl) = {
-    val format = new CarbonTableInputFormat[Object]
-    val tableInfo1 = getTableInfo
-    val conf = attemptContext.getConfiguration
-    CarbonInputFormat.setTableInfo(conf, tableInfo1)
-    CarbonInputFormat.setDatabaseName(conf, tableInfo1.getDatabaseName)
-    CarbonInputFormat.setTableName(conf, tableInfo1.getFactTable.getTableName)
-    CarbonInputFormat.setDataTypeConverter(conf, classOf[SparkDataTypeConverterImpl])
-
-    val identifier = tableInfo1.getOrCreateAbsoluteTableIdentifier()
-    CarbonInputFormat.setTablePath(
-      conf,
-      identifier.appendWithLocalPrefix(identifier.getTablePath))
-
-    CarbonInputFormat.setSegmentsToAccess(
-      conf,
-      Segment.toSegmentList(Array(segmentId), null))
-
-    CarbonInputFormat.setColumnProjection(
-      conf,
-      new CarbonProjection(indexColumns.map(_.getColName)))
-    format
-  }
-
-  override protected def getPartitions = {
-    if (!dataMapSchema.isIndexDataMap) {
-      throw new UnsupportedOperationException
-    }
-    val conf = new Configuration()
-    val jobConf = new JobConf(conf)
-    SparkHadoopUtil.get.addCredentials(jobConf)
-    val job = Job.getInstance(jobConf)
-    job.getConfiguration.set("query.id", queryId)
-
-    val format = new CarbonTableInputFormat[Object]
-
-    CarbonInputFormat.setSegmentsToAccess(
-      job.getConfiguration,
-      Segment.toSegmentList(Array(segmentId), null))
-
-    CarbonInputFormat.setTableInfo(
-      job.getConfiguration,
-      tableInfo)
-    CarbonInputFormat.setTablePath(
-      job.getConfiguration,
-      tableInfo.getOrCreateAbsoluteTableIdentifier().getTablePath)
-    CarbonInputFormat.setDatabaseName(
-      job.getConfiguration,
-      tableInfo.getDatabaseName)
-    CarbonInputFormat.setTableName(
-      job.getConfiguration,
-      tableInfo.getFactTable.getTableName)
-
-    format
-      .getSplits(job)
-      .asScala
-      .map(_.asInstanceOf[CarbonInputSplit])
-      .groupBy(_.taskId)
-      .map { group =>
-        new CarbonMultiBlockSplit(
-          group._2.asJava,
-          group._2.flatMap(_.getLocations).toArray)
-      }
-      .zipWithIndex
-      .map { split =>
-        new CarbonSparkPartition(id, split._2, split._1)
-      }
-      .toArray
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 42c9c25..bdbaef5 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -848,11 +848,11 @@ object CarbonDataRDDFactory {
       val errorMessage = s"Dataload failed due to failure in table status updation for" +
                          s" ${carbonLoadModel.getTableName}"
       LOGGER.audit("Data load is failed for " +
-                   s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
+                   s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}")
       LOGGER.error("Dataload failed due to failure in table status updation.")
       throw new Exception(errorMessage)
     } else {
-      DataMapStatusManager.disableDataMapsOfTable(carbonTable)
+      DataMapStatusManager.disableAllLazyDataMaps(carbonTable)
     }
     done
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
index 5fac5a8..497f95a 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
@@ -36,6 +36,7 @@ import org.apache.spark.sql.optimizer.CarbonFilters
 import org.apache.spark.sql.profiler.{Profiler, SQLStart}
 import org.apache.spark.util.{CarbonReflectionUtils, Utils}
 
+import org.apache.carbondata.common.annotations.InterfaceAudience
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.scan.expression.LiteralExpression
@@ -110,6 +111,16 @@ class CarbonSession(@transient val sc: SparkContext,
     )
   }
 
+  /**
+   * Return true if the specified sql statement will hit the datamap
+   * This API is for test purpose only
+   */
+  @InterfaceAudience.Developer(Array("DataMap"))
+  def isDataMapHit(sqlStatement: String, dataMapName: String): Boolean = {
+    val message = sql(s"EXPLAIN $sqlStatement").collect()
+    message(0).getString(0).contains(dataMapName)
+  }
+
   def isSearchModeEnabled: Boolean = carbonStore != null
 
   /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
index 34a4013..25589d4 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
@@ -16,6 +16,8 @@
  */
 package org.apache.spark.sql.execution.command.datamap
 
+import java.util
+
 import scala.collection.JavaConverters._
 import scala.collection.mutable
 
@@ -27,7 +29,7 @@ import org.apache.carbondata.common.exceptions.sql.{MalformedCarbonCommandExcept
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.datamap.{DataMapProvider, DataMapStoreManager}
 import org.apache.carbondata.core.datamap.status.DataMapStatusManager
-import org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider
+import org.apache.carbondata.core.metadata.schema.datamap.{DataMapClassProvider, DataMapProperty}
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, DataMapSchema}
 import org.apache.carbondata.datamap.{DataMapManager, IndexDataMapProvider}
 
@@ -41,7 +43,8 @@ case class CarbonCreateDataMapCommand(
     dmProviderName: String,
     dmProperties: Map[String, String],
     queryString: Option[String],
-    ifNotExistsSet: Boolean = false)
+    ifNotExistsSet: Boolean = false,
+    deferredRebuild: Boolean = false)
   extends AtomicRunnableCommand {
 
   private var dataMapProvider: DataMapProvider = _
@@ -78,9 +81,16 @@ case class CarbonCreateDataMapCommand(
     }
 
     dataMapSchema = new DataMapSchema(dataMapName, dmProviderName)
-    dataMapSchema.setProperties(new java.util.HashMap[String, String](
-      dmProperties.map(x => (x._1.trim, x._2.trim)).asJava))
 
+    val property = dmProperties.map(x => (x._1.trim, x._2.trim)).asJava
+    val javaMap = new java.util.HashMap[String, String](property)
+    javaMap.put(DataMapProperty.DEFERRED_REBUILD, deferredRebuild.toString)
+    dataMapSchema.setProperties(javaMap)
+
+    if (dataMapSchema.isIndexDataMap && mainTable == null) {
+      throw new MalformedDataMapCommandException(
+        "For this datamap, main table is required. Use `CREATE DATAMAP ... ON TABLE ...` ")
+    }
     dataMapProvider = DataMapManager.get.getDataMapProvider(mainTable, dataMapSchema, sparkSession)
 
     // If it is index datamap, check whether the column has datamap created already
@@ -101,6 +111,10 @@ case class CarbonCreateDataMapCommand(
         dataMapProvider.initMeta(queryString.orNull)
         DataMapStatusManager.disableDataMap(dataMapName)
       case _ =>
+        if (deferredRebuild) {
+          throw new MalformedDataMapCommandException(
+            "DEFERRED REBUILD is not supported on this DataMap")
+        }
         dataMapProvider.initMeta(queryString.orNull)
     }
     val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
@@ -111,10 +125,11 @@ case class CarbonCreateDataMapCommand(
   override def processData(sparkSession: SparkSession): Seq[Row] = {
     if (dataMapProvider != null) {
       dataMapProvider.initData()
-      if (mainTable != null &&
-          mainTable.isAutoRefreshDataMap &&
-          !dataMapSchema.isIndexDataMap) {
+      if (mainTable != null && !deferredRebuild) {
         dataMapProvider.rebuild()
+        if (dataMapSchema.isIndexDataMap) {
+          DataMapStatusManager.enableDataMap(dataMapName)
+        }
       }
     }
     Seq.empty

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRebuildCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRebuildCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRebuildCommand.scala
new file mode 100644
index 0000000..6493c83
--- /dev/null
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRebuildCommand.scala
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.command.datamap
+
+import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
+import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.execution.command.DataCommand
+
+import org.apache.carbondata.core.datamap.{DataMapRegistry, DataMapStoreManager}
+import org.apache.carbondata.core.datamap.status.DataMapStatusManager
+import org.apache.carbondata.datamap.{DataMapManager, IndexDataMapRebuildRDD}
+
+/**
+ * Rebuild the datamaps through sync with main table data. After sync with parent table's it enables
+ * the datamap.
+ */
+case class CarbonDataMapRebuildCommand(
+    dataMapName: String,
+    tableIdentifier: Option[TableIdentifier]) extends DataCommand {
+
+  override def processData(sparkSession: SparkSession): Seq[Row] = {
+    val schema = DataMapStoreManager.getInstance().getDataMapSchema(dataMapName)
+
+    val table = tableIdentifier match {
+      case Some(identifier) =>
+        CarbonEnv.getCarbonTable(identifier)(sparkSession)
+      case _ =>
+        CarbonEnv.getCarbonTable(
+          Option(schema.getRelationIdentifier.getDatabaseName),
+          schema.getRelationIdentifier.getTableName
+        )(sparkSession)
+    }
+    val provider = DataMapManager.get().getDataMapProvider(table, schema, sparkSession)
+    provider.rebuild()
+
+    // After rebuild successfully enable the datamap.
+    DataMapStatusManager.enableDataMap(dataMapName)
+    Seq.empty
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRefreshCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRefreshCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRefreshCommand.scala
deleted file mode 100644
index 4f3b7bc..0000000
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRefreshCommand.scala
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.sql.execution.command.datamap
-
-import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
-import org.apache.spark.sql.catalyst.TableIdentifier
-import org.apache.spark.sql.execution.command.DataCommand
-
-import org.apache.carbondata.core.datamap.{DataMapRegistry, DataMapStoreManager}
-import org.apache.carbondata.core.datamap.status.DataMapStatusManager
-import org.apache.carbondata.datamap.{DataMapManager, IndexDataMapRefreshRDD}
-
-/**
- * Refresh the datamaps through sync with main table data. After sync with parent table's it enables
- * the datamap.
- */
-case class CarbonDataMapRefreshCommand(
-    dataMapName: String,
-    tableIdentifier: Option[TableIdentifier]) extends DataCommand {
-
-  override def processData(sparkSession: SparkSession): Seq[Row] = {
-    val schema = DataMapStoreManager.getInstance().getDataMapSchema(dataMapName)
-
-    val table = tableIdentifier match {
-      case Some(identifier) =>
-        CarbonEnv.getCarbonTable(identifier)(sparkSession)
-      case _ =>
-        CarbonEnv.getCarbonTable(
-          Option(schema.getRelationIdentifier.getDatabaseName),
-          schema.getRelationIdentifier.getTableName
-        )(sparkSession)
-    }
-    val provider = DataMapManager.get().getDataMapProvider(table, schema, sparkSession)
-    provider.rebuild()
-
-    // After sync success enable the datamap.
-    DataMapStatusManager.enableDataMap(dataMapName)
-    Seq.empty
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
index 1bb7d7c..4f60297 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonFileFormat.scala
@@ -231,8 +231,9 @@ class SparkCarbonFileFormat extends FileFormat
 
         val tab = model.getTable
         DataMapStoreManager.getInstance().clearDataMaps(identifier)
-        val dataMapExprWrapper = DataMapChooser.get
-          .choose(tab, model.getFilterExpressionResolverTree)
+
+        val dataMapExprWrapper = new DataMapChooser(tab).choose(
+          model.getFilterExpressionResolverTree)
 
         // TODO : handle the partition for CarbonFileLevelFormat
         val prunedBlocklets = dataMapExprWrapper.prune(segments, null)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
index 62da7ed..9dd8105 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
@@ -25,7 +25,7 @@ import org.apache.spark.sql.catalyst.{CarbonDDLSqlParser, TableIdentifier}
 import org.apache.spark.sql.catalyst.CarbonTableIdentifierImplicit._
 import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.execution.command._
-import org.apache.spark.sql.execution.command.datamap.{CarbonCreateDataMapCommand, CarbonDataMapRefreshCommand, CarbonDataMapShowCommand, CarbonDropDataMapCommand}
+import org.apache.spark.sql.execution.command.datamap.{CarbonCreateDataMapCommand, CarbonDataMapRebuildCommand, CarbonDataMapShowCommand, CarbonDropDataMapCommand}
 import org.apache.spark.sql.execution.command.management._
 import org.apache.spark.sql.execution.command.partition.{CarbonAlterTableDropPartitionCommand, CarbonAlterTableSplitPartitionCommand}
 import org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand, CarbonAlterTableDataTypeChangeCommand, CarbonAlterTableDropColumnCommand}
@@ -147,19 +147,23 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
 
   /**
    * The syntax of datamap creation is as follows.
-   * CREATE DATAMAP IF NOT EXISTS datamapName ON TABLE tableName USING 'DataMapProviderName'
+   * CREATE DATAMAP IF NOT EXISTS datamapName [ON TABLE tableName]
+   * USING 'DataMapProviderName'
+   * [WITH DEFERRED REBUILD]
    * DMPROPERTIES('KEY'='VALUE') AS SELECT COUNT(COL1) FROM tableName
    */
   protected lazy val createDataMap: Parser[LogicalPlan] =
     CREATE ~> DATAMAP ~> opt(IF ~> NOT ~> EXISTS) ~ ident ~
     opt(ontable) ~
-    (USING ~> stringLit) ~ (DMPROPERTIES ~> "(" ~> repsep(loadOptions, ",") <~ ")").? ~
+    (USING ~> stringLit) ~
+    opt(WITH ~> DEFERRED ~> REBUILD) ~
+    (DMPROPERTIES ~> "(" ~> repsep(loadOptions, ",") <~ ")").? ~
     (AS ~> restInput).? <~ opt(";") ^^ {
-      case ifnotexists ~ dmname ~ tableIdent ~ dmProviderName ~ dmprops ~ query =>
+      case ifnotexists ~ dmname ~ tableIdent ~ dmProviderName ~ deferred ~ dmprops ~ query =>
 
         val map = dmprops.getOrElse(List[(String, String)]()).toMap[String, String]
         CarbonCreateDataMapCommand(dmname, tableIdent, dmProviderName, map, query,
-          ifnotexists.isDefined)
+          ifnotexists.isDefined, deferred.isDefined)
     }
 
   protected lazy val ontable: Parser[TableIdentifier] =
@@ -190,12 +194,12 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
 
   /**
    * The syntax of show datamap is used to show datamaps on the table
-   * REFRESH DATAMAP datamapname [ON TABLE] tableName
+   * REBUILD DATAMAP datamapname [ON TABLE] tableName
    */
   protected lazy val refreshDataMap: Parser[LogicalPlan] =
-    REFRESH ~> DATAMAP ~> ident ~ opt(ontable) <~ opt(";") ^^ {
+    REBUILD ~> DATAMAP ~> ident ~ opt(ontable) <~ opt(";") ^^ {
       case datamap ~ tableIdent =>
-        CarbonDataMapRefreshCommand(datamap, tableIdent)
+        CarbonDataMapRebuildCommand(datamap, tableIdent)
     }
 
   protected lazy val deleteRecords: Parser[LogicalPlan] =

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
index a8e7b6c..7df3901 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
@@ -22,44 +22,56 @@ import java.util.UUID
 
 import scala.util.Random
 
+import org.apache.spark.sql.{CarbonSession, DataFrame}
 import org.apache.spark.sql.test.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
+import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
 
+import org.apache.carbondata.core.datamap.status.DataMapStatusManager
 import org.apache.carbondata.core.util.CarbonProperties
 
-class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
-  val inputFile = s"$resourcesPath/bloom_datamap_input.csv"
+class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll with BeforeAndAfterEach {
+  val bigFile = s"$resourcesPath/bloom_datamap_input_big.csv"
+  val smallFile = s"$resourcesPath/bloom_datamap_input_small.csv"
   val normalTable = "carbon_normal"
   val bloomDMSampleTable = "carbon_bloom"
   val dataMapName = "bloom_dm"
-  val lineNum = 500000
 
   override protected def beforeAll(): Unit = {
     new File(CarbonProperties.getInstance().getSystemFolderLocation).delete()
-    createFile(inputFile, line = lineNum, start = 0)
+    createFile(bigFile, line = 500000)
+    createFile(smallFile)
     sql(s"DROP TABLE IF EXISTS $normalTable")
     sql(s"DROP TABLE IF EXISTS $bloomDMSampleTable")
   }
 
-  private def checkQuery = {
+  override def afterEach(): Unit = {
+    sql(s"DROP TABLE IF EXISTS $normalTable")
+    sql(s"DROP TABLE IF EXISTS $bloomDMSampleTable")
+  }
+
+  private def checkSqlHitDataMap(sqlText: String, dataMapName: String, shouldHit: Boolean): DataFrame = {
+    if (shouldHit) {
+      assert(sqlContext.sparkSession.asInstanceOf[CarbonSession].isDataMapHit(sqlText, dataMapName))
+    } else {
+      assert(!sqlContext.sparkSession.asInstanceOf[CarbonSession].isDataMapHit(sqlText, dataMapName))
+    }
+    sql(sqlText)
+  }
+
+  private def checkQuery(dataMapName: String, shouldHit: Boolean = true) = {
     checkAnswer(
-      sql(s"select * from $bloomDMSampleTable where id = 1"),
+      checkSqlHitDataMap(s"select * from $bloomDMSampleTable where id = 1", dataMapName, shouldHit),
       sql(s"select * from $normalTable where id = 1"))
     checkAnswer(
-      sql(s"select * from $bloomDMSampleTable where id = 999"),
+      checkSqlHitDataMap(s"select * from $bloomDMSampleTable where id = 999", dataMapName, shouldHit),
       sql(s"select * from $normalTable where id = 999"))
     checkAnswer(
-      sql(s"select * from $bloomDMSampleTable where city = 'city_1'"),
+      checkSqlHitDataMap(s"select * from $bloomDMSampleTable where city = 'city_1'", dataMapName, shouldHit),
       sql(s"select * from $normalTable where city = 'city_1'"))
     checkAnswer(
-      sql(s"select * from $bloomDMSampleTable where city = 'city_999'"),
+      checkSqlHitDataMap(s"select * from $bloomDMSampleTable where city = 'city_999'", dataMapName, shouldHit),
       sql(s"select * from $normalTable where city = 'city_999'"))
-    checkAnswer(
-      sql(s"select count(distinct id), count(distinct name), count(distinct city)," +
-          s" count(distinct s1), count(distinct s2) from $bloomDMSampleTable"),
-      sql(s"select count(distinct id), count(distinct name), count(distinct city)," +
-          s" count(distinct s1), count(distinct s2) from $normalTable"))
-    checkAnswer(
+     checkAnswer(
       sql(s"select min(id), max(id), min(name), max(name), min(city), max(city)" +
           s" from $bloomDMSampleTable"),
       sql(s"select min(id), max(id), min(name), max(name), min(city), max(city)" +
@@ -86,28 +98,34 @@ class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
          | DMProperties('INDEX_COLUMNS'='city,id', 'BLOOM_SIZE'='640000')
       """.stripMargin)
 
+    var map = DataMapStatusManager.readDataMapStatusMap()
+    assert(map.get(dataMapName).isEnabled)
+
     // load two segments
     (1 to 2).foreach { i =>
       sql(
         s"""
-           | LOAD DATA LOCAL INPATH '$inputFile' INTO TABLE $normalTable
+           | LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $normalTable
            | OPTIONS('header'='false')
          """.stripMargin)
       sql(
         s"""
-           | LOAD DATA LOCAL INPATH '$inputFile' INTO TABLE $bloomDMSampleTable
+           | LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $bloomDMSampleTable
            | OPTIONS('header'='false')
          """.stripMargin)
     }
 
+    map = DataMapStatusManager.readDataMapStatusMap()
+    assert(map.get(dataMapName).isEnabled)
+
     sql(s"SHOW DATAMAP ON TABLE $bloomDMSampleTable").show(false)
     checkExistence(sql(s"SHOW DATAMAP ON TABLE $bloomDMSampleTable"), true, dataMapName)
-    checkQuery
+    checkQuery(dataMapName)
     sql(s"DROP TABLE IF EXISTS $normalTable")
     sql(s"DROP TABLE IF EXISTS $bloomDMSampleTable")
   }
 
-  test("test create bloom datamap and refresh datamap") {
+  test("test create bloom datamap and REBUILD DATAMAP") {
     sql(
       s"""
          | CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
@@ -125,12 +143,12 @@ class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
     (1 to 2).foreach { i =>
       sql(
         s"""
-           | LOAD DATA LOCAL INPATH '$inputFile' INTO TABLE $normalTable
+           | LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $normalTable
            | OPTIONS('header'='false')
          """.stripMargin)
       sql(
         s"""
-           | LOAD DATA LOCAL INPATH '$inputFile' INTO TABLE $bloomDMSampleTable
+           | LOAD DATA LOCAL INPATH '$bigFile' INTO TABLE $bloomDMSampleTable
            | OPTIONS('header'='false')
          """.stripMargin)
     }
@@ -142,18 +160,135 @@ class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
          | DMProperties('INDEX_COLUMNS'='city,id', 'BLOOM_SIZE'='640000')
       """.stripMargin)
 
-    sql(s"REFRESH DATAMAP $dataMapName ON TABLE $bloomDMSampleTable")
     sql(s"SHOW DATAMAP ON TABLE $bloomDMSampleTable").show(false)
     checkExistence(sql(s"SHOW DATAMAP ON TABLE $bloomDMSampleTable"), true, dataMapName)
-    checkQuery
+    checkQuery(dataMapName)
+    sql(s"DROP TABLE IF EXISTS $normalTable")
+    sql(s"DROP TABLE IF EXISTS $bloomDMSampleTable")
+  }
+
+  test("test create bloom datamap with DEFERRED REBUILD, query hit datamap") {
+    sql(
+      s"""
+         | CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
+         | s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
+         | STORED BY 'carbondata' TBLPROPERTIES('table_blocksize'='128')
+         |  """.stripMargin)
+    sql(
+      s"""
+         | CREATE TABLE $bloomDMSampleTable(id INT, name STRING, city STRING, age INT,
+         | s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
+         | STORED BY 'carbondata' TBLPROPERTIES('table_blocksize'='128')
+         |  """.stripMargin)
+
+    sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$smallFile' INTO TABLE $normalTable
+         | OPTIONS('header'='false')
+       """.stripMargin)
+    sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$smallFile' INTO TABLE $bloomDMSampleTable
+         | OPTIONS('header'='false')
+       """.stripMargin)
+
+    sql(
+      s"""
+         | CREATE DATAMAP $dataMapName ON TABLE $bloomDMSampleTable
+         | USING 'bloomfilter'
+         | WITH DEFERRED REBUILD
+         | DMProperties('INDEX_COLUMNS'='city,id', 'BLOOM_SIZE'='640000')
+      """.stripMargin)
+
+    var map = DataMapStatusManager.readDataMapStatusMap()
+    assert(!map.get(dataMapName).isEnabled)
+
+    sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$smallFile' INTO TABLE $normalTable
+         | OPTIONS('header'='false')
+         """.stripMargin)
+    sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$smallFile' INTO TABLE $bloomDMSampleTable
+         | OPTIONS('header'='false')
+         """.stripMargin)
+
+    map = DataMapStatusManager.readDataMapStatusMap()
+    assert(!map.get(dataMapName).isEnabled)
+
+    // once we rebuild, it should be enabled
+    sql(s"REBUILD DATAMAP $dataMapName ON TABLE $bloomDMSampleTable")
+    map = DataMapStatusManager.readDataMapStatusMap()
+    assert(map.get(dataMapName).isEnabled)
+
+    sql(s"SHOW DATAMAP ON TABLE $bloomDMSampleTable").show(false)
+    checkExistence(sql(s"SHOW DATAMAP ON TABLE $bloomDMSampleTable"), true, dataMapName)
+    checkQuery(dataMapName)
+
+    // once we load again, datamap should be disabled, since it is lazy
+    sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$smallFile' INTO TABLE $normalTable
+         | OPTIONS('header'='false')
+         """.stripMargin)
+    sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$smallFile' INTO TABLE $bloomDMSampleTable
+         | OPTIONS('header'='false')
+         """.stripMargin)
+    map = DataMapStatusManager.readDataMapStatusMap()
+    assert(!map.get(dataMapName).isEnabled)
+    checkQuery(dataMapName, shouldHit = false)
+
     sql(s"DROP TABLE IF EXISTS $normalTable")
     sql(s"DROP TABLE IF EXISTS $bloomDMSampleTable")
   }
 
-  // todo: will add more tests on bloom datamap, such as exception, delete datamap, show profiler
+  test("test create bloom datamap with DEFERRED REBUILD, query not hit datamap") {
+    sql(
+      s"""
+         | CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
+         | s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
+         | STORED BY 'carbondata' TBLPROPERTIES('table_blocksize'='128')
+         |  """.stripMargin)
+    sql(
+      s"""
+         | CREATE TABLE $bloomDMSampleTable(id INT, name STRING, city STRING, age INT,
+         | s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
+         | STORED BY 'carbondata' TBLPROPERTIES('table_blocksize'='128')
+         |  """.stripMargin)
+
+    sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$smallFile' INTO TABLE $normalTable
+         | OPTIONS('header'='false')
+       """.stripMargin)
+    sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$smallFile' INTO TABLE $bloomDMSampleTable
+         | OPTIONS('header'='false')
+       """.stripMargin)
+
+    sql(
+      s"""
+         | CREATE DATAMAP $dataMapName ON TABLE $bloomDMSampleTable
+         | USING 'bloomfilter'
+         | WITH DEFERRED REBUILD
+         | DMProperties('INDEX_COLUMNS'='city,id', 'BLOOM_SIZE'='640000')
+      """.stripMargin)
+
+    checkExistence(sql(s"SHOW DATAMAP ON TABLE $bloomDMSampleTable"), true, dataMapName)
+
+    // datamap is not loaded, so it should not hit
+    checkQuery(dataMapName, shouldHit = false)
+    sql(s"DROP TABLE IF EXISTS $normalTable")
+    sql(s"DROP TABLE IF EXISTS $bloomDMSampleTable")
+  }
 
   override protected def afterAll(): Unit = {
-    deleteFile(inputFile)
+    deleteFile(bigFile)
+    deleteFile(smallFile)
     sql(s"DROP TABLE IF EXISTS $normalTable")
     sql(s"DROP TABLE IF EXISTS $bloomDMSampleTable")
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/processing/src/main/java/org/apache/carbondata/processing/datamap/DataMapWriterListener.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datamap/DataMapWriterListener.java b/processing/src/main/java/org/apache/carbondata/processing/datamap/DataMapWriterListener.java
index 9c3d5d6..3dc34d3 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datamap/DataMapWriterListener.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datamap/DataMapWriterListener.java
@@ -61,8 +61,12 @@ public class DataMapWriterListener {
     }
     if (tableIndices != null) {
       for (TableDataMap tableDataMap : tableIndices) {
-        DataMapFactory factory = tableDataMap.getDataMapFactory();
-        register(factory, segmentId, taskNo);
+        // register it only if it is not lazy datamap, for lazy datamap, user
+        // will rebuild the datamap manually
+        if (!tableDataMap.getDataMapSchema().isLazy()) {
+          DataMapFactory factory = tableDataMap.getDataMapFactory();
+          register(factory, segmentId, taskNo);
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java
----------------------------------------------------------------------
diff --git a/store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java b/store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java
index 957e9f8..35acb17 100644
--- a/store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java
+++ b/store/search/src/main/java/org/apache/carbondata/store/worker/SearchRequestHandler.java
@@ -21,12 +21,11 @@ import java.io.IOException;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Objects;
 
 import org.apache.carbondata.common.annotations.InterfaceAudience;
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.core.datamap.DataMapChooser;
-import org.apache.carbondata.core.datamap.DataMapLevel;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.datamap.dev.expr.DataMapExprWrapper;
 import org.apache.carbondata.core.datastore.block.TableBlockInfo;
@@ -104,8 +103,10 @@ public class SearchRequestHandler {
     LOG.info(String.format("[SearchId:%d] %s, number of block: %d",
         request.searchId(), queryModel.toString(), mbSplit.getAllSplits().size()));
 
-    // If there is FGDataMap, prune the split by applying FGDataMap
-    queryModel = tryPruneByFGDataMap(request.searchId(), table, queryModel, mbSplit);
+    // If there is DataMap selected in Master, prune the split by it
+    if (request.dataMap() != null) {
+      queryModel = prune(request.searchId(), table, queryModel, mbSplit, request.dataMap().get());
+    }
 
     // In search mode, reader will read multiple blocks by using a thread pool
     CarbonRecordReader<CarbonRow> reader =
@@ -135,35 +136,32 @@ public class SearchRequestHandler {
    * If there is FGDataMap defined for this table and filter condition in the query,
    * prune the splits by the DataMap and set the pruned split into the QueryModel and return
    */
-  private QueryModel tryPruneByFGDataMap(int queryId,
-      CarbonTable table, QueryModel queryModel, CarbonMultiBlockSplit mbSplit) throws IOException {
-    DataMapExprWrapper wrapper =
-        DataMapChooser.get().choose(table, queryModel.getFilterExpressionResolverTree());
-
-    if (wrapper.getDataMapLevel() == DataMapLevel.FG) {
-      List<Segment> segments = new LinkedList<>();
-      for (CarbonInputSplit split : mbSplit.getAllSplits()) {
-        segments.add(Segment.toSegment(
-            split.getSegmentId(), new LatestFilesReadCommittedScope(table.getTablePath())));
-      }
-      List<ExtendedBlocklet> prunnedBlocklets = wrapper.prune(segments, null);
+  private QueryModel prune(int queryId, CarbonTable table, QueryModel queryModel,
+      CarbonMultiBlockSplit mbSplit, DataMapExprWrapper datamap) throws IOException {
+    Objects.requireNonNull(datamap);
+    List<Segment> segments = new LinkedList<>();
+    for (CarbonInputSplit split : mbSplit.getAllSplits()) {
+      segments.add(
+          Segment.toSegment(split.getSegmentId(),
+              new LatestFilesReadCommittedScope(table.getTablePath())));
+    }
+    List<ExtendedBlocklet> prunnedBlocklets = datamap.prune(segments, null);
 
-      List<String> pathToRead = new LinkedList<>();
-      for (ExtendedBlocklet prunnedBlocklet : prunnedBlocklets) {
-        pathToRead.add(prunnedBlocklet.getPath());
-      }
+    List<String> pathToRead = new LinkedList<>();
+    for (ExtendedBlocklet prunnedBlocklet : prunnedBlocklets) {
+      pathToRead.add(prunnedBlocklet.getPath());
+    }
 
-      List<TableBlockInfo> blocks = queryModel.getTableBlockInfos();
-      List<TableBlockInfo> blockToRead = new LinkedList<>();
-      for (TableBlockInfo block : blocks) {
-        if (pathToRead.contains(block.getFilePath())) {
-          blockToRead.add(block);
-        }
+    List<TableBlockInfo> blocks = queryModel.getTableBlockInfos();
+    List<TableBlockInfo> blockToRead = new LinkedList<>();
+    for (TableBlockInfo block : blocks) {
+      if (pathToRead.contains(block.getFilePath())) {
+        blockToRead.add(block);
       }
-      LOG.info(String.format("[SearchId:%d] pruned using FG DataMap, pruned blocks: %d",
-          queryId, blockToRead.size()));
-      queryModel.setTableBlockInfos(blockToRead);
     }
+    LOG.info(String.format("[SearchId:%d] pruned using FG DataMap, pruned blocks: %d", queryId,
+        blockToRead.size()));
+    queryModel.setTableBlockInfos(blockToRead);
     return queryModel;
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/store/search/src/main/scala/org/apache/spark/rpc/Master.scala
----------------------------------------------------------------------
diff --git a/store/search/src/main/scala/org/apache/spark/rpc/Master.scala b/store/search/src/main/scala/org/apache/spark/rpc/Master.scala
index 5b31a49..2e9a532 100644
--- a/store/search/src/main/scala/org/apache/spark/rpc/Master.scala
+++ b/store/search/src/main/scala/org/apache/spark/rpc/Master.scala
@@ -38,12 +38,15 @@ import org.apache.spark.util.ThreadUtils
 
 import org.apache.carbondata.common.annotations.InterfaceAudience
 import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.datamap.DataMapChooser
+import org.apache.carbondata.core.datamap.dev.expr.DataMapExprWrapper
 import org.apache.carbondata.core.datastore.block.Distributable
 import org.apache.carbondata.core.datastore.row.CarbonRow
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.scan.expression.Expression
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.hadoop.CarbonMultiBlockSplit
+import org.apache.carbondata.hadoop.api.CarbonInputFormat
 import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
 import org.apache.carbondata.processing.util.CarbonLoaderUtil
 import org.apache.carbondata.store.worker.Status
@@ -212,11 +215,13 @@ class Master(sparkConf: SparkConf) {
     // prune data and get a mapping of worker hostname to list of blocks,
     // then add these blocks to the SearchRequest and fire the RPC call
     val nodeBlockMapping: JMap[String, JList[Distributable]] = pruneBlock(table, columns, filter)
+    val fgDataMap = chooseFGDataMap(table, filter)
     val tuple = nodeBlockMapping.asScala.map { case (splitAddress, blocks) =>
       // Build a SearchRequest
       val split = new SerializableWritable[CarbonMultiBlockSplit](
         new CarbonMultiBlockSplit(blocks, splitAddress))
-      val request = SearchRequest(queryId, split, table.getTableInfo, columns, filter, localLimit)
+      val request =
+        SearchRequest(queryId, split, table.getTableInfo, columns, filter, localLimit, fgDataMap)
 
       // Find an Endpoind and send the request to it
       // This RPC is non-blocking so that we do not need to wait before send to next worker
@@ -249,6 +254,14 @@ class Master(sparkConf: SparkConf) {
     output.toArray
   }
 
+  private def chooseFGDataMap(
+      table: CarbonTable,
+      filter: Expression): Option[DataMapExprWrapper] = {
+    val chooser = new DataMapChooser(table)
+    val filterInterface = table.resolveFilter(filter)
+    Option(chooser.chooseFGDataMap(filterInterface))
+  }
+
   /**
    * Prune data by using CarbonInputFormat.getSplit
    * Return a mapping of host address to list of block
@@ -261,6 +274,9 @@ class Master(sparkConf: SparkConf) {
     val job = new Job(jobConf)
     val format = CarbonInputFormatUtil.createCarbonTableInputFormat(
       job, table, columns, filter, null, null)
+
+    // We will do FG pruning in reader side, so don't do it here
+    CarbonInputFormat.setFgDataMapPruning(job.getConfiguration, false)
     val splits = format.getSplits(job)
     val distributables = splits.asScala.map { split =>
       split.asInstanceOf[Distributable]

http://git-wip-us.apache.org/repos/asf/carbondata/blob/747be9b1/store/search/src/main/scala/org/apache/spark/search/Searcher.scala
----------------------------------------------------------------------
diff --git a/store/search/src/main/scala/org/apache/spark/search/Searcher.scala b/store/search/src/main/scala/org/apache/spark/search/Searcher.scala
index e467fd3..1532284 100644
--- a/store/search/src/main/scala/org/apache/spark/search/Searcher.scala
+++ b/store/search/src/main/scala/org/apache/spark/search/Searcher.scala
@@ -21,6 +21,7 @@ import org.apache.spark.SerializableWritable
 import org.apache.spark.rpc.{RpcCallContext, RpcEndpoint, RpcEnv}
 
 import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.datamap.dev.expr.DataMapExprWrapper
 import org.apache.carbondata.core.metadata.schema.table.TableInfo
 import org.apache.carbondata.core.scan.expression.Expression
 import org.apache.carbondata.hadoop.CarbonMultiBlockSplit
@@ -37,11 +38,11 @@ class Searcher(override val rpcEnv: RpcEnv) extends RpcEndpoint {
   }
 
   override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
-    case req@SearchRequest(_, _, _, _, _, _) =>
+    case req: SearchRequest =>
       val response = new SearchRequestHandler().handleSearch(req)
       context.reply(response)
 
-    case req@ShutdownRequest(_) =>
+    case req: ShutdownRequest =>
       val response = new SearchRequestHandler().handleShutdown(req)
       context.reply(response)
 
@@ -59,7 +60,8 @@ case class SearchRequest(
     tableInfo: TableInfo,
     projectColumns: Array[String],
     filterExpression: Expression,
-    limit: Long)
+    limit: Long,
+    dataMap: Option[DataMapExprWrapper])
 
 // Search result sent from worker to master
 case class SearchResult(


[42/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
[CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Integrate MV DataMap to Carbon

This closes #2302


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/2881c6bb
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/2881c6bb
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/2881c6bb

Branch: refs/heads/spark-2.3
Commit: 2881c6bbc17f34c0e17b6483130e70311e41c653
Parents: bf73e9f
Author: ravipesala <ra...@gmail.com>
Authored: Sat May 12 10:41:01 2018 +0530
Committer: Jacky Li <ja...@qq.com>
Committed: Sun May 13 17:08:56 2018 +0800

----------------------------------------------------------------------
 .../datamap/status/DataMapStatusManager.java    |   16 +
 .../mv/rewrite/MVCreateTestCase.scala           |   16 +
 .../mv/rewrite/MVSampleTestCase.scala           |   16 +
 .../carbondata/mv/rewrite/MVTPCDSTestCase.scala |   16 +
 .../carbondata/mv/rewrite/MVTpchTestCase.scala  |   16 +
 .../carbondata/mv/rewrite/Tpcds_1_4_Suite.scala |   80 --
 .../mv/plans/LogicalToModularPlanSuite.scala    |    8 +-
 .../carbondata/mv/plans/ModularToSQLSuite.scala |    5 +-
 .../src/test/resources/data_big.csv             |   91 ++
 .../src/test/resources/tpch/customers.csv       |  500 +++++++++
 .../src/test/resources/tpch/lineitem.csv        | 1000 ++++++++++++++++++
 .../src/test/resources/tpch/nation.csv          |   25 +
 .../src/test/resources/tpch/orders.csv          | 1000 ++++++++++++++++++
 .../src/test/resources/tpch/region.csv          |    5 +
 .../src/test/resources/tpch/supplier.csv        | 1000 ++++++++++++++++++
 .../apache/spark/sql/hive/CarbonAnalyzer.scala  |   19 +-
 pom.xml                                         |    1 +
 17 files changed, 3727 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusManager.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusManager.java b/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusManager.java
index b540146..d0ff589 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/status/DataMapStatusManager.java
@@ -53,6 +53,22 @@ public class DataMapStatusManager {
     return storageProvider.getDataMapStatusDetails();
   }
 
+  /**
+   * Get enabled datamap status details
+   * @return
+   * @throws IOException
+   */
+  public static DataMapStatusDetail[] getEnabledDataMapStatusDetails() throws IOException {
+    DataMapStatusDetail[] dataMapStatusDetails = storageProvider.getDataMapStatusDetails();
+    List<DataMapStatusDetail> statusDetailList = new ArrayList<>();
+    for (DataMapStatusDetail statusDetail : dataMapStatusDetails) {
+      if (statusDetail.getStatus() == DataMapStatus.ENABLED) {
+        statusDetailList.add(statusDetail);
+      }
+    }
+    return statusDetailList.toArray(new DataMapStatusDetail[statusDetailList.size()]);
+  }
+
   public static Map<String, DataMapStatusDetail> readDataMapStatusMap() throws IOException {
     DataMapStatusDetail[] details = storageProvider.getDataMapStatusDetails();
     Map<String, DataMapStatusDetail> map = new HashMap<>(details.length);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
index 184fdc1..4b636db 100644
--- a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.carbondata.mv.rewrite
 
 import java.io.File

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVSampleTestCase.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVSampleTestCase.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVSampleTestCase.scala
index f8eb11f..6068ef5 100644
--- a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVSampleTestCase.scala
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVSampleTestCase.scala
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.carbondata.mv.rewrite
 
 import java.io.File

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTPCDSTestCase.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTPCDSTestCase.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTPCDSTestCase.scala
index 473b338..d7a19b8 100644
--- a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTPCDSTestCase.scala
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTPCDSTestCase.scala
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.carbondata.mv.rewrite
 
 import java.io.File

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTpchTestCase.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTpchTestCase.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTpchTestCase.scala
index 89813b5..ff5bdac 100644
--- a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTpchTestCase.scala
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVTpchTestCase.scala
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.carbondata.mv.rewrite
 
 import java.io.File

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/Tpcds_1_4_Suite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/Tpcds_1_4_Suite.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/Tpcds_1_4_Suite.scala
deleted file mode 100644
index 7fac508..0000000
--- a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/Tpcds_1_4_Suite.scala
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.mv.rewrite
-
-import org.apache.spark.sql.SparkSession
-import org.apache.spark.sql.test.util.PlanTest
-import org.scalatest.BeforeAndAfter
-
-import org.apache.carbondata.mv.testutil.Tpcds_1_4_Tables._
-
-class Tpcds_1_4_Suite extends PlanTest with BeforeAndAfter {
-
-//  test("test using tpc-ds queries") {
-//
-//    tpcds1_4Tables.foreach { create_table =>
-//      hiveClient.runSqlHive(create_table)
-//    }
-    
-//    val dest = "case_30"
-//    val dest = "case_32"
-    val dest = "case_3"
-    
-//    tpcds_1_4_testCases.foreach { testcase =>
-//      if (testcase._1 == dest) {
-//        val mqoSession = new MQOSession(testHive.sparkSession)
-//        val summaryDF = testHive.sparkSession.sql(testcase._2)
-//        mqoSession.sharedState.registerSummaryDataset(summaryDF)
-//
-//        Try(mqoSession.rewrite(testcase._3).withSummaryData) match {
-//          case Success(rewrittenPlan) =>
-//            println(s"""\n\n===== REWRITTEN MODULAR PLAN for ${testcase._1} =====\n\n$rewrittenPlan \n""")
-//
-//            Try(rewrittenPlan.asCompactSQL) match {
-//              case Success(s) =>
-//                println(s"\n\n===== CONVERTED SQL for ${testcase._1} =====\n\n${s}\n")
-//                if (!s.trim.equals(testcase._4)) {
-//                  println(
-//                      s"""
-//                      |=== FAIL: SQLs do not match ===
-//                      |${sideBySide(s, testcase._4).mkString("\n")}
-//                      """.stripMargin)
-//                      }
-//
-//              case Failure(e) => println(s"""\n\n===== CONVERTED SQL for ${testcase._1} failed =====\n\n${e.toString}""")
-//            }
-//
-//          case Failure(e) => println(s"""\n\n==== MODULARIZE the logical query plan for ${testcase._1} failed =====\n\n${e.toString}""")
-//        }
-        
-//        val rewrittenSQL = rewrittenPlan.asCompactSQL
-//        val rewrittenSQL = mqoSession.rewrite(testcase._3).toCompactSQL
-
-//        if (!rewrittenSQL.equals(testcase._4)) {
-//          fail(
-//              s"""
-//              |=== FAIL: SQLs do not match ===
-//              |${sideBySide(rewrittenSQL, testcase._4).mkString("\n")}
-//              """.stripMargin)
-//              }
-//        }
-//
-//    }
-//
-//  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/LogicalToModularPlanSuite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/LogicalToModularPlanSuite.scala b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/LogicalToModularPlanSuite.scala
index e5b6ca5..176c5d2 100644
--- a/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/LogicalToModularPlanSuite.scala
+++ b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/LogicalToModularPlanSuite.scala
@@ -122,7 +122,7 @@ class LogicalToModularPlanSuite extends ModularPlanTest {
     comparePlans(modularized, correctAnswer)
   }
   
-  test("joins: conjunctive predicates #1 with alias") {
+  ignore("joins: conjunctive predicates #1 with alias") {
     val left = testRelation0.where('a === 1).subquery('x)
     val right = testRelation1.subquery('y)
     val originalQuery =
@@ -136,7 +136,7 @@ class LogicalToModularPlanSuite extends ModularPlanTest {
     comparePlans(modularized, correctAnswer)
   }
   
-  test("joins: conjunctive predicates #2 with alias") {
+  ignore("joins: conjunctive predicates #2 with alias") {
     val lleft = testRelation0.where('a >= 3).subquery('z)
     val left = testRelation0.where('a === 1).subquery('x)
     val right = testRelation0.subquery('y)
@@ -154,7 +154,7 @@ class LogicalToModularPlanSuite extends ModularPlanTest {
     comparePlans(modularized, correctAnswer)
   }
   
-  test("SPJGH query") {
+  ignore("SPJGH query") {
     val left = testRelation0.where('b >= 1).subquery('x)
     val right = testRelation2.where('d >= 2).subquery('y)
     
@@ -172,7 +172,7 @@ class LogicalToModularPlanSuite extends ModularPlanTest {
     comparePlans(modularized, correctAnswer)
   }
   
-  test("non-SPJGH query") {
+  ignore("non-SPJGH query") {
     val mqoAnswer = try testRelation0.where('b > 2).select('a).orderBy('a.asc).analyze.modularize catch {
       case e: Exception =>
         s"""

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ModularToSQLSuite.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ModularToSQLSuite.scala b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ModularToSQLSuite.scala
index 26f68fe..7cd3d73 100644
--- a/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ModularToSQLSuite.scala
+++ b/datamap/mv/plan/src/test/scala/org/apache/carbondata/mv/plans/ModularToSQLSuite.scala
@@ -17,7 +17,8 @@
 
 package org.apache.carbondata.mv.plans
 
-import org.scalatest.BeforeAndAfterAll
+import org.apache.spark.sql.SparkSession
+import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
 
 import org.apache.carbondata.mv.dsl._
 import org.apache.carbondata.mv.testutil.ModularPlanTest
@@ -128,7 +129,7 @@ class ModularToSQLSuite extends ModularPlanTest with BeforeAndAfterAll {
     sql(s"drop table if exists item")
   }
 
-  test("convert modular plans to sqls") {
+  ignore("convert modular plans to sqls") {
     testSQLBatch.foreach { query =>
       testPlan(query)
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/integration/spark-common-test/src/test/resources/data_big.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/data_big.csv b/integration/spark-common-test/src/test/resources/data_big.csv
new file mode 100644
index 0000000..c02b358
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/data_big.csv
@@ -0,0 +1,91 @@
+empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate,attendance,utilization,salary
+11,arvind,SE,17-01-2007,1,developer,10,network,928478,17-02-2007,29-11-2016,96,96,5040
+12,krithin,SSE,29-05-2008,1,developer,11,protocol,928378,29-06-2008,30-12-2016,85,95,7124
+13,madhan,TPL,07-07-2009,2,tester,10,network,928478,07-08-2009,30-12-2016,88,99,9054
+14,anandh,SA,29-12-2010,3,manager,11,protocol,928278,29-01-2011,29-06-2016,77,92,11248
+15,ayushi,SSA,09-11-2011,1,developer,12,security,928375,09-12-2011,29-05-2016,99,91,13245
+16,pramod,SE,14-10-2012,1,developer,13,configManagement,928478,14-11-2012,29-12-2016,86,93,5040
+17,gawrav,PL,22-09-2013,2,tester,12,security,928778,22-10-2013,15-11-2016,78,97,9574
+18,sibi,TL,15-08-2014,2,tester,14,Learning,928176,15-09-2014,29-05-2016,84,98,7245
+19,shivani,PL,12-05-2015,1,developer,10,network,928977,12-06-2015,12-11-2016,88,91,11254
+20,bill,PM,01-12-2015,3,manager,14,Learning,928479,01-01-2016,30-11-2016,75,94,13547
+11,arvind,SE,17-01-2007,1,developer,10,network,928478,17-02-2007,29-11-2016,96,96,5040
+12,krithin,SSE,29-05-2008,1,developer,11,protocol,928378,29-06-2008,30-12-2016,85,95,7124
+13,madhan,TPL,07-07-2009,2,tester,10,network,928478,07-08-2009,30-12-2016,88,99,9054
+14,anandh,SA,29-12-2010,3,manager,11,protocol,928278,29-01-2011,29-06-2016,77,92,11248
+15,ayushi,SSA,09-11-2011,1,developer,12,security,928375,09-12-2011,29-05-2016,99,91,13245
+16,pramod,SE,14-10-2012,1,developer,13,configManagement,928478,14-11-2012,29-12-2016,86,93,5040
+17,gawrav,PL,22-09-2013,2,tester,12,security,928778,22-10-2013,15-11-2016,78,97,9574
+18,sibi,TL,15-08-2014,2,tester,14,Learning,928176,15-09-2014,29-05-2016,84,98,7245
+19,shivani,PL,12-05-2015,1,developer,10,network,928977,12-06-2015,12-11-2016,88,91,11254
+20,bill,PM,01-12-2015,3,manager,14,Learning,928479,01-01-2016,30-11-2016,75,94,13547
+11,arvind,SE,17-01-2007,1,developer,10,network,928478,17-02-2007,29-11-2016,96,96,5040
+12,krithin,SSE,29-05-2008,1,developer,11,protocol,928378,29-06-2008,30-12-2016,85,95,7124
+13,madhan,TPL,07-07-2009,2,tester,10,network,928478,07-08-2009,30-12-2016,88,99,9054
+14,anandh,SA,29-12-2010,3,manager,11,protocol,928278,29-01-2011,29-06-2016,77,92,11248
+15,ayushi,SSA,09-11-2011,1,developer,12,security,928375,09-12-2011,29-05-2016,99,91,13245
+16,pramod,SE,14-10-2012,1,developer,13,configManagement,928478,14-11-2012,29-12-2016,86,93,5040
+17,gawrav,PL,22-09-2013,2,tester,12,security,928778,22-10-2013,15-11-2016,78,97,9574
+18,sibi,TL,15-08-2014,2,tester,14,Learning,928176,15-09-2014,29-05-2016,84,98,7245
+19,shivani,PL,12-05-2015,1,developer,10,network,928977,12-06-2015,12-11-2016,88,91,11254
+20,bill,PM,01-12-2015,3,manager,14,Learning,928479,01-01-2016,30-11-2016,75,94,13547
+11,arvind,SE,17-01-2007,1,developer,10,network,928478,17-02-2007,29-11-2016,96,96,5040
+12,krithin,SSE,29-05-2008,1,developer,11,protocol,928378,29-06-2008,30-12-2016,85,95,7124
+13,madhan,TPL,07-07-2009,2,tester,10,network,928478,07-08-2009,30-12-2016,88,99,9054
+14,anandh,SA,29-12-2010,3,manager,11,protocol,928278,29-01-2011,29-06-2016,77,92,11248
+15,ayushi,SSA,09-11-2011,1,developer,12,security,928375,09-12-2011,29-05-2016,99,91,13245
+16,pramod,SE,14-10-2012,1,developer,13,configManagement,928478,14-11-2012,29-12-2016,86,93,5040
+17,gawrav,PL,22-09-2013,2,tester,12,security,928778,22-10-2013,15-11-2016,78,97,9574
+18,sibi,TL,15-08-2014,2,tester,14,Learning,928176,15-09-2014,29-05-2016,84,98,7245
+19,shivani,PL,12-05-2015,1,developer,10,network,928977,12-06-2015,12-11-2016,88,91,11254
+20,bill,PM,01-12-2015,3,manager,14,Learning,928479,01-01-2016,30-11-2016,75,94,13547
+11,arvind,SE,17-01-2007,1,developer,10,network,928478,17-02-2007,29-11-2016,96,96,5040
+12,krithin,SSE,29-05-2008,1,developer,11,protocol,928378,29-06-2008,30-12-2016,85,95,7124
+13,madhan,TPL,07-07-2009,2,tester,10,network,928478,07-08-2009,30-12-2016,88,99,9054
+14,anandh,SA,29-12-2010,3,manager,11,protocol,928278,29-01-2011,29-06-2016,77,92,11248
+15,ayushi,SSA,09-11-2011,1,developer,12,security,928375,09-12-2011,29-05-2016,99,91,13245
+16,pramod,SE,14-10-2012,1,developer,13,configManagement,928478,14-11-2012,29-12-2016,86,93,5040
+17,gawrav,PL,22-09-2013,2,tester,12,security,928778,22-10-2013,15-11-2016,78,97,9574
+18,sibi,TL,15-08-2014,2,tester,14,Learning,928176,15-09-2014,29-05-2016,84,98,7245
+19,shivani,PL,12-05-2015,1,developer,10,network,928977,12-06-2015,12-11-2016,88,91,11254
+20,bill,PM,01-12-2015,3,manager,14,Learning,928479,01-01-2016,30-11-2016,75,94,13547
+11,arvind,SE,17-01-2007,1,developer,10,network,928478,17-02-2007,29-11-2016,96,96,5040
+12,krithin,SSE,29-05-2008,1,developer,11,protocol,928378,29-06-2008,30-12-2016,85,95,7124
+13,madhan,TPL,07-07-2009,2,tester,10,network,928478,07-08-2009,30-12-2016,88,99,9054
+14,anandh,SA,29-12-2010,3,manager,11,protocol,928278,29-01-2011,29-06-2016,77,92,11248
+15,ayushi,SSA,09-11-2011,1,developer,12,security,928375,09-12-2011,29-05-2016,99,91,13245
+16,pramod,SE,14-10-2012,1,developer,13,configManagement,928478,14-11-2012,29-12-2016,86,93,5040
+17,gawrav,PL,22-09-2013,2,tester,12,security,928778,22-10-2013,15-11-2016,78,97,9574
+18,sibi,TL,15-08-2014,2,tester,14,Learning,928176,15-09-2014,29-05-2016,84,98,7245
+19,shivani,PL,12-05-2015,1,developer,10,network,928977,12-06-2015,12-11-2016,88,91,11254
+20,bill,PM,01-12-2015,3,manager,14,Learning,928479,01-01-2016,30-11-2016,75,94,13547
+11,arvind,SE,17-01-2007,1,developer,10,network,928478,17-02-2007,29-11-2016,96,96,5040
+12,krithin,SSE,29-05-2008,1,developer,11,protocol,928378,29-06-2008,30-12-2016,85,95,7124
+13,madhan,TPL,07-07-2009,2,tester,10,network,928478,07-08-2009,30-12-2016,88,99,9054
+14,anandh,SA,29-12-2010,3,manager,11,protocol,928278,29-01-2011,29-06-2016,77,92,11248
+15,ayushi,SSA,09-11-2011,1,developer,12,security,928375,09-12-2011,29-05-2016,99,91,13245
+16,pramod,SE,14-10-2012,1,developer,13,configManagement,928478,14-11-2012,29-12-2016,86,93,5040
+17,gawrav,PL,22-09-2013,2,tester,12,security,928778,22-10-2013,15-11-2016,78,97,9574
+18,sibi,TL,15-08-2014,2,tester,14,Learning,928176,15-09-2014,29-05-2016,84,98,7245
+19,shivani,PL,12-05-2015,1,developer,10,network,928977,12-06-2015,12-11-2016,88,91,11254
+20,bill,PM,01-12-2015,3,manager,14,Learning,928479,01-01-2016,30-11-2016,75,94,13547
+11,arvind,SE,17-01-2007,1,developer,10,network,928478,17-02-2007,29-11-2016,96,96,5040
+12,krithin,SSE,29-05-2008,1,developer,11,protocol,928378,29-06-2008,30-12-2016,85,95,7124
+13,madhan,TPL,07-07-2009,2,tester,10,network,928478,07-08-2009,30-12-2016,88,99,9054
+14,anandh,SA,29-12-2010,3,manager,11,protocol,928278,29-01-2011,29-06-2016,77,92,11248
+15,ayushi,SSA,09-11-2011,1,developer,12,security,928375,09-12-2011,29-05-2016,99,91,13245
+16,pramod,SE,14-10-2012,1,developer,13,configManagement,928478,14-11-2012,29-12-2016,86,93,5040
+17,gawrav,PL,22-09-2013,2,tester,12,security,928778,22-10-2013,15-11-2016,78,97,9574
+18,sibi,TL,15-08-2014,2,tester,14,Learning,928176,15-09-2014,29-05-2016,84,98,7245
+19,shivani,PL,12-05-2015,1,developer,10,network,928977,12-06-2015,12-11-2016,88,91,11254
+20,bill,PM,01-12-2015,3,manager,14,Learning,928479,01-01-2016,30-11-2016,75,94,13547
+11,arvind,SE,17-01-2007,1,developer,10,network,928478,17-02-2007,29-11-2016,96,96,5040
+12,krithin,SSE,29-05-2008,1,developer,11,protocol,928378,29-06-2008,30-12-2016,85,95,7124
+13,madhan,TPL,07-07-2009,2,tester,10,network,928478,07-08-2009,30-12-2016,88,99,9054
+14,anandh,SA,29-12-2010,3,manager,11,protocol,928278,29-01-2011,29-06-2016,77,92,11248
+15,ayushi,SSA,09-11-2011,1,developer,12,security,928375,09-12-2011,29-05-2016,99,91,13245
+16,pramod,SE,14-10-2012,1,developer,13,configManagement,928478,14-11-2012,29-12-2016,86,93,5040
+17,gawrav,PL,22-09-2013,2,tester,12,security,928778,22-10-2013,15-11-2016,78,97,9574
+18,sibi,TL,15-08-2014,2,tester,14,Learning,928176,15-09-2014,29-05-2016,84,98,7245
+19,shivani,PL,12-05-2015,1,developer,10,network,928977,12-06-2015,12-11-2016,88,91,11254
+20,bill,PM,01-12-2015,3,manager,14,Learning,928479,01-01-2016,30-11-2016,75,94,13547
\ No newline at end of file


[45/50] [abbrv] carbondata git commit: [CARBONDATA-2370] Added document for presto multinode setup for carbondata

Posted by ma...@apache.org.
[CARBONDATA-2370] Added document for presto multinode setup for carbondata

Added document for presto multinode setup for carbondata

This closes #2199


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/3d23fa69
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/3d23fa69
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/3d23fa69

Branch: refs/heads/spark-2.3
Commit: 3d23fa693a604701e3ab3b574b20f21d089e8b43
Parents: 1c5b526
Author: Geetika Gupta <ge...@knoldus.in>
Authored: Fri Apr 20 16:47:35 2018 +0530
Committer: chenliang613 <ch...@huawei.com>
Committed: Mon May 14 21:43:24 2018 +0800

----------------------------------------------------------------------
 .../Presto_Cluster_Setup_For_Carbondata.md      | 133 +++++++++++++++++++
 1 file changed, 133 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/3d23fa69/integration/presto/Presto_Cluster_Setup_For_Carbondata.md
----------------------------------------------------------------------
diff --git a/integration/presto/Presto_Cluster_Setup_For_Carbondata.md b/integration/presto/Presto_Cluster_Setup_For_Carbondata.md
new file mode 100644
index 0000000..082b8fe
--- /dev/null
+++ b/integration/presto/Presto_Cluster_Setup_For_Carbondata.md
@@ -0,0 +1,133 @@
+# Presto Multinode Cluster setup For Carbondata
+
+## Installing Presto
+
+  1. Download the 0.187 version of Presto using:
+  `wget https://repo1.maven.org/maven2/com/facebook/presto/presto-server/0.187/presto-server-0.187.tar.gz`
+
+  2. Extract Presto tar file: `tar zxvf presto-server-0.187.tar.gz`.
+
+  3. Download the Presto CLI for the coordinator and name it presto.
+
+  ```
+    wget https://repo1.maven.org/maven2/com/facebook/presto/presto-cli/0.187/presto-cli-0.187-executable.jar
+
+    mv presto-cli-0.187-executable.jar presto
+
+    chmod +x presto
+  ```
+
+ ## Create Configuration Files
+
+  1. Create `etc` folder in presto-server-0.187 directory.
+  2. Create `config.properties`, `jvm.config`, `log.properties`, and `node.properties` files.
+  3. Install uuid to generate a node.id.
+
+      ```
+      sudo apt-get install uuid
+
+      uuid
+      ```
+
+
+##### Contents of your node.properties file
+
+  ```
+  node.environment=production
+  node.id=<generated uuid>
+  node.data-dir=/home/ubuntu/data
+  ```
+
+##### Contents of your jvm.config file
+
+  ```
+  -server
+  -Xmx16G
+  -XX:+UseG1GC
+  -XX:G1HeapRegionSize=32M
+  -XX:+UseGCOverheadLimit
+  -XX:+ExplicitGCInvokesConcurrent
+  -XX:+HeapDumpOnOutOfMemoryError
+  -XX:OnOutOfMemoryError=kill -9 %p
+  ```
+
+##### Contents of your log.properties file
+  ```
+  com.facebook.presto=INFO
+  ```
+
+ The default minimum level is `INFO`. There are four levels: `DEBUG`, `INFO`, `WARN` and `ERROR`.
+
+## Coordinator Configurations
+
+  ##### Contents of your config.properties
+  ```
+  coordinator=true
+  node-scheduler.include-coordinator=false
+  http-server.http.port=8086
+  query.max-memory=50GB
+  query.max-memory-per-node=2GB
+  discovery-server.enabled=true
+  discovery.uri=<coordinator_ip>:8086
+  ```
+The options `node-scheduler.include-coordinator=false` and `coordinator=true` indicate that the node is the coordinator and tells the coordinator not to do any of the computation work itself and to use the workers.
+
+**Note**: We recommend setting `query.max-memory-per-node` to half of the JVM config max memory, though if your workload is highly concurrent, you may want to use a lower value for `query.max-memory-per-node`.
+
+Also relation between below two configuration-properties should be like:
+If, `query.max-memory-per-node=30GB`
+Then, `query.max-memory=<30GB * number of nodes>`.
+
+## Worker Configurations
+
+##### Contents of your config.properties
+
+  ```
+  coordinator=false
+  http-server.http.port=8086
+  query.max-memory=50GB
+  query.max-memory-per-node=2GB
+  discovery.uri=<coordinator_ip>:8086
+  ```
+
+**Note**: `jvm.config` and `node.properties` files are same for all the nodes (worker + coordinator). All the nodes should have different `node.id`.
+
+## Catalog Configurations
+
+1. Create a folder named `catalog` in etc directory of presto on all the nodes of the cluster including the coordinator.
+
+##### Configuring Carbondata in Presto
+1. Create a file named `carbondata.properties` in the `catalog` folder and set the required properties on all the nodes.
+
+## Add Plugins
+
+1. Create a directory named `carbondata` in plugin directory of presto.
+2. Copy `carbondata` jars to `plugin/carbondata` directory on all nodes.
+
+## Start Presto Server on all nodes
+
+```
+./presto-server-0.187/bin/launcher start
+```
+To run it as a background process.
+
+```
+./presto-server-0.187/bin/launcher run
+```
+To run it in foreground.
+
+## Start Presto CLI
+```
+./presto
+```
+To connect to carbondata catalog use the following command:
+
+```
+./presto --server <coordinator_ip>:8086 --catalog carbondata --schema <schema_name>
+```
+Execute the following command to ensure the workers are connected.
+
+```
+select * from system.runtime.nodes;
+```
+Now you can use the Presto CLI on the coordinator to query data sources in the catalog using the Presto workers.


[31/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/bf73e9fe/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/matching/TestTPCDS_1_4_Batch.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/matching/TestTPCDS_1_4_Batch.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/matching/TestTPCDS_1_4_Batch.scala
new file mode 100644
index 0000000..074bf00
--- /dev/null
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/matching/TestTPCDS_1_4_Batch.scala
@@ -0,0 +1,2496 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.mv.rewrite.matching
+
+object TestTPCDS_1_4_Batch {
+  val tpcds_1_4_testCases = Seq(
+      // sequence of triples.  each triple denotes (MV, user query, rewritten query)
+      // test case 1: test SELECT-SELECT-EXACT_MATCH with simple SELECT (extract from q45)
+      ("case_1",
+       """
+        |SELECT i_item_id, i_item_sk
+        |FROM item
+        |WHERE i_item_sk IN (2, 3, 5, 7, 11, 13, 17, 19, 23, 29)
+        """.stripMargin.trim,
+       """
+        |SELECT i_item_id
+        |FROM item
+        |WHERE i_item_sk IN (2, 3, 5, 7, 11, 13, 17, 19)
+        """.stripMargin.trim,
+       """
+        |SELECT
+        |FROM 
+        |WHERE 
+        """.stripMargin.trim),
+      // test case 2: test SELECT-SELECT-EXACT_MATCH with SELECT containing join (derive from q64)
+      ("case_2",
+       """
+        |SELECT cs1.product_name, cs1.store_name, cs1.store_zip, cs1.b_street_number,
+        |       cs1.b_streen_name, cs1.b_city, cs1.b_zip, cs1.c_street_number, cs1.c_street_name,
+        |       cs1.c_city, cs1.c_zip, cs1.syear, cs1.cnt, cs1.s1, cs1.s2, cs1.s3, cs2.s1,
+        |       cs2.s2, cs2.s3, cs2.syear, cs2.cnt
+        |FROM cross_sales cs1,cross_sales cs2
+        |WHERE cs1.item_sk=cs2.item_sk AND
+        |     cs1.syear = 1999 AND
+        |     cs2.syear = 1999 + 1 AND
+        |     cs2.cnt <= cs1.cnt AND
+        |     cs1.store_name = cs2.store_name AND
+        |     cs1.store_zip = cs2.store_zip
+        """.stripMargin.trim,
+       """
+        |SELECT cs1.product_name, cs1.store_name, cs1.store_zip, cs1.b_street_number,
+        |       cs1.b_streen_name, cs1.b_city, cs1.b_zip, cs1.c_street_number, cs1.c_street_name,
+        |       cs1.c_city, cs1.c_zip, cs1.syear, cs1.cnt, cs1.s1, cs1.s2, cs1.s3, cs2.s1,
+        |       cs2.s2, cs2.s3
+        |FROM cross_sales cs1,cross_sales cs2
+        |WHERE cs1.item_sk=cs2.item_sk AND
+        |     cs1.syear = 1999 AND
+        |     cs2.syear = 1999 + 1 AND
+        |     cs2.cnt <= cs1.cnt AND
+        |     cs1.store_name = cs2.store_name AND
+        |     cs1.store_zip = cs2.store_zip
+        |ORDER BY cs1.product_name, cs1.store_name, cs2.cnt
+        """.stripMargin.trim,
+       """
+        |SELECT
+        |FROM
+        |WHERE
+        """.stripMargin.trim),
+      // test case 3: test simple SELECT with GROUPBY (from q99)
+      ("case_3",
+       """
+        |SELECT count(ss_sold_date_sk) as not_null_total,
+        |       max(ss_sold_date_sk) as max_ss_sold_date_sk,
+        |       max(ss_sold_time_sk) as max_ss_sold_time_sk,
+        |       ss_item_sk,
+        |       ss_store_sk
+        |FROM store_sales
+        |GROUP BY ss_item_sk, ss_store_sk
+        """.stripMargin.trim,
+       """
+        |SELECT count(ss_sold_date_sk) as not_null_total,
+        |       max(ss_sold_date_sk) as max_ss_sold_date_sk,
+        |       ss_item_sk,
+        |       ss_store_sk
+        |FROM store_sales
+        |GROUP BY ss_item_sk, ss_store_sk
+        """.stripMargin.trim,
+       """
+        |SELECT gen_subsumer_0.`not_null_total`,
+        |       gen_subsumer_0.`max_ss_sold_date_sk`,
+        |       gen_subsumer_0.`ss_item_sk`,
+        |       gen_subsumer_0.`ss_store_sk`
+        |FROM
+        |  (SELECT count(`ss_sold_date_sk`) AS `not_null_total`, max(`ss_sold_date_sk`) AS `max_ss_sold_date_sk`, max(`ss_sold_time_sk`) AS `max_ss_sold_time_sk`, `ss_item_sk`, `ss_store_sk` 
+        |  FROM store_sales
+        |  GROUP BY `ss_item_sk`, `ss_store_sk`) gen_subsumer_0
+        """.stripMargin.trim),
+      // test case 4 test SELECT containing join with GROUPBY (from q65)
+      ("case_4",
+       """
+        |SELECT ss_store_sk, ss_item_sk, sum(ss_sales_price) as revenue
+        |FROM store_sales, date_dim
+        |WHERE ss_sold_date_sk = d_date_sk and d_month_seq between 1176 and 1176+11
+        |GROUP BY ss_store_sk, ss_item_sk
+        """.stripMargin.trim,
+       """
+        |SELECT ss_store_sk, ss_item_sk, sum(ss_sales_price) as revenue
+        |FROM store_sales, date_dim
+        |WHERE ss_sold_date_sk = d_date_sk and d_month_seq between 1176 and 1176+11
+        |GROUP BY ss_store_sk, ss_item_sk
+        """.stripMargin.trim,
+       """
+        |SELECT `ss_store_sk`, `ss_item_sk`, sum(`ss_sales_price`) AS `revenue` 
+        |FROM
+        |  store_sales
+        |  INNER JOIN date_dim ON (`d_month_seq` >= 1176) AND (`d_month_seq` <= 1187) AND (`ss_sold_date_sk` = `d_date_sk`)
+        |GROUP BY `ss_store_sk`, `ss_item_sk`
+        """.stripMargin.trim),
+      // the following 6 cases involve an MV of store_sales, item, date_dim
+      // q3
+      ("case_5",
+       """
+        |SELECT dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |       item.i_manufact_id, substr(item.i_item_desc, 1, 30) itemdesc, item.i_category, item.i_class,
+        |       item.i_current_price, item.i_item_sk, store_sales.ss_store_sk,
+        |       SUM(store_sales.ss_ext_sales_price) sum_agg,
+        |       SUM(store_sales.ss_quantity*store_sales.ss_list_price) sales, count(*) number_sales
+        |FROM date_dim dt, store_sales, item
+        |WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
+        |      AND store_sales.ss_item_sk = item.i_item_sk
+        |GROUP BY dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |         item.i_manufact_id, substr(item.i_item_desc, 1, 30), item.i_category, item.i_category_id,
+        |         item.i_class, item.i_class_id, item.i_current_price, item.i_manager_id,
+        |         item.i_item_sk, store_sales.ss_store_sk
+        """.stripMargin.trim,
+       """
+        | SELECT dt.d_year, item.i_brand_id brand_id, item.i_brand brand,SUM(ss_ext_sales_price) sum_agg
+        | FROM  date_dim dt, store_sales, item
+        | WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
+        |   AND store_sales.ss_item_sk = item.i_item_sk
+        |   AND item.i_manufact_id = 128
+        |   AND dt.d_moy=11
+        | GROUP BY dt.d_year, item.i_brand, item.i_brand_id
+        | ORDER BY dt.d_year, sum_agg desc, brand_id
+        | LIMIT 100
+        """.stripMargin.trim,
+       """
+        |SELECT gen_subsumer_0.`d_year`, gen_subsumer_0.`i_brand_id` AS `brand_id`, gen_subsumer_0.`i_brand` AS `brand`, sum(gen_subsumer_0.`sum_agg`) AS `sum_agg` 
+        |FROM
+        |  (SELECT `d_date`, `d_moy`, `d_year`, `i_brand`, `i_brand_id`, `i_item_id`, `i_item_desc`, `i_manufact_id`, substring(`i_item_desc`, 1, 30) AS `itemdesc`, `i_category`, `i_class`, `i_current_price`, `i_item_sk`, `ss_store_sk`, sum(`ss_ext_sales_price`) AS `sum_agg`, sum((CAST(CAST(`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(`ss_list_price` AS DECIMAL(12,2)))) AS `sales`, count(1) AS `number_sales` 
+        |  FROM
+        |    date_dim dt 
+        |    INNER JOIN store_sales ON (`d_date_sk` = `ss_sold_date_sk`)
+        |    INNER JOIN item   ON (`ss_item_sk` = `i_item_sk`)
+        |  GROUP BY `d_date`, `d_moy`, `d_year`, `i_brand`, `i_brand_id`, `i_item_id`, `i_item_desc`, `i_manufact_id`, substring(`i_item_desc`, 1, 30), `i_category`, `i_category_id`, `i_class`, `i_class_id`, `i_current_price`, `i_manager_id`, `i_item_sk`, `ss_store_sk`) gen_subsumer_0 
+        |WHERE
+        |  (gen_subsumer_0.`d_moy` = 11) AND (gen_subsumer_0.`i_manufact_id` = 128)
+        |GROUP BY gen_subsumer_0.`d_year`, gen_subsumer_0.`i_brand`, gen_subsumer_0.`i_brand_id`
+        |ORDER BY gen_subsumer_0.`d_year` ASC NULLS FIRST, `sum_agg` DESC NULLS LAST, `brand_id` ASC NULLS FIRST
+        |LIMIT 100
+        """.stripMargin.trim),
+      // q23a
+      ("case_6",
+       """
+        |SELECT dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |       item.i_manufact_id, substr(item.i_item_desc, 1, 30) itemdesc, item.i_category, item.i_class,
+        |       item.i_current_price, item.i_item_sk, store_sales.ss_store_sk,
+        |       SUM(store_sales.ss_ext_sales_price) sum_agg,
+        |       SUM(store_sales.ss_quantity*store_sales.ss_list_price) sales, count(*) number_sales
+        |FROM date_dim dt, store_sales, item
+        |WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
+        |      AND store_sales.ss_item_sk = item.i_item_sk
+        |GROUP BY dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |         item.i_manufact_id, substr(item.i_item_desc, 1, 30), item.i_category, item.i_category_id,
+        |         item.i_class, item.i_class_id, item.i_current_price, item.i_manager_id,
+        |         item.i_item_sk, store_sales.ss_store_sk
+        """.stripMargin.trim,
+       """
+        | with frequent_ss_items as
+        | (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt
+        |  from store_sales, date_dim, item
+        |  where ss_sold_date_sk = d_date_sk
+        |    and ss_item_sk = i_item_sk
+        |    and d_year in (2000, 2000+1, 2000+2,2000+3)
+        |  group by substr(i_item_desc,1,30),i_item_sk,d_date
+        |  having count(*) >4),
+        | max_store_sales as
+        | (select max(csales) tpcds_cmax
+        |  from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales
+        |        from store_sales, customer, date_dim
+        |        where ss_customer_sk = c_customer_sk
+        |         and ss_sold_date_sk = d_date_sk
+        |         and d_year in (2000, 2000+1, 2000+2,2000+3)
+        |        group by c_customer_sk) x),
+        | best_ss_customer as
+        | (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales
+        |  from store_sales, customer
+        |  where ss_customer_sk = c_customer_sk
+        |  group by c_customer_sk
+        |  having sum(ss_quantity*ss_sales_price) > (50/100.0) *
+        |    (select * from max_store_sales))
+        | select sum(sales)
+        | from ((select cs_quantity*cs_list_price sales
+        |       from catalog_sales, date_dim
+        |       where d_year = 2000
+        |         and d_moy = 2
+        |         and cs_sold_date_sk = d_date_sk
+        |         and cs_item_sk in (select item_sk from frequent_ss_items)
+        |         and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer))
+        |      union all
+        |      (select ws_quantity*ws_list_price sales
+        |       from web_sales, date_dim
+        |       where d_year = 2000
+        |         and d_moy = 2
+        |         and ws_sold_date_sk = d_date_sk
+        |         and ws_item_sk in (select item_sk from frequent_ss_items)
+        |         and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer))) y
+        | limit 100
+        """.stripMargin.trim,
+       """
+        |SELECT sum(gen_subquery_4.`sales`) AS `sum(sales)` 
+        |FROM
+        |  (SELECT (CAST(CAST(catalog_sales.`cs_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(catalog_sales.`cs_list_price` AS DECIMAL(12,2))) AS `sales` 
+        |  FROM
+        |    catalog_sales
+        |    LEFT SEMI JOIN (SELECT gen_subsumer_0.`i_item_sk` AS `item_sk`, sum(gen_subsumer_0.`number_sales`) AS `count(1)` 
+        |    FROM
+        |      (SELECT dt.`d_date`, dt.`d_moy`, dt.`d_year`, item.`i_brand`, item.`i_brand_id`, item.`i_item_id`, item.`i_item_desc`, item.`i_manufact_id`, substring(item.`i_item_desc`, 1, 30) AS `itemdesc`, item.`i_category`, item.`i_class`, item.`i_current_price`, item.`i_item_sk`, store_sales.`ss_store_sk`, sum(store_sales.`ss_ext_sales_price`) AS `sum_agg`, sum((CAST(CAST(store_sales.`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(store_sales.`ss_list_price` AS DECIMAL(12,2)))) AS `sales`, count(1) AS `number_sales` 
+        |      FROM
+        |        date_dim dt 
+        |        INNER JOIN store_sales ON (dt.`d_date_sk` = store_sales.`ss_sold_date_sk`)
+        |        INNER JOIN item   ON (store_sales.`ss_item_sk` = item.`i_item_sk`)
+        |      GROUP BY dt.`d_date`, dt.`d_moy`, dt.`d_year`, item.`i_brand`, item.`i_brand_id`, item.`i_item_id`, item.`i_item_desc`, item.`i_manufact_id`, substring(item.`i_item_desc`, 1, 30), item.`i_category`, item.`i_category_id`, item.`i_class`, item.`i_class_id`, item.`i_current_price`, item.`i_manager_id`, item.`i_item_sk`, store_sales.`ss_store_sk`) gen_subsumer_0 
+        |    WHERE
+        |      (gen_subsumer_0.`d_year` IN (2000, 2001, 2002, 2003))
+        |    GROUP BY gen_subsumer_0.`itemdesc`, gen_subsumer_0.`i_item_sk`, gen_subsumer_0.`d_date`) gen_subquery_0  ON (gen_subquery_0.`count(1)` > 4L) AND (catalog_sales.`cs_item_sk` = gen_subquery_0.`item_sk`)
+        |    LEFT SEMI JOIN (SELECT customer.`c_customer_sk`, sum((CAST(CAST(store_sales.`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(store_sales.`ss_sales_price` AS DECIMAL(12,2)))) AS `sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#271 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#274 as decimal(12,2)))), DecimalType(18,2)))` 
+        |    FROM
+        |      store_sales
+        |      INNER JOIN customer   ON (store_sales.`ss_customer_sk` = customer.`c_customer_sk`)
+        |    GROUP BY customer.`c_customer_sk`) gen_subquery_1  ON (CAST(gen_subquery_1.`sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#271 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#274 as decimal(12,2)))), DecimalType(18,2)))` AS DECIMAL(38,8)) > (0.500000BD * CAST((SELECT max(gen_scalar_subquery_0_0.`csales`) AS `tpcds_cmax`   FROM  (SELECT sum((CAST(CAST(store_sales.`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(store_sales.`ss_sales_price` AS DECIMAL(12,2)))) AS `csales`   FROM  store_sales  INNER JOIN customer   ON (store_sales.`ss_customer_sk` = customer.`c_customer_sk`)  INNER JOIN date_dim ON (date_dim.`d_year` IN (2000, 2001, 2002, 2003)) AND (store_sales.`ss_sold_date_sk` = date_dim.`d_date_sk`)  GROUP BY customer.`c_customer_sk`) gen_scalar_subquery_0_0 ) AS DECIMAL(32,6)))) AND (catalog_sales.`cs_bill_customer_sk` = gen_subquery_1.`c_customer_sk`)
+        |    INNER JOIN date_dim ON (date_dim.`d_year` = 2000) AND (date_dim.`d_moy` = 2) AND (catalog_sales.`cs_sold_date_sk` = date_dim.`d_date_sk`)
+        |  UNION ALL
+        |  SELECT (CAST(CAST(web_sales.`ws_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(web_sales.`ws_list_price` AS DECIMAL(12,2))) AS `sales` 
+        |  FROM
+        |    web_sales
+        |    LEFT SEMI JOIN (SELECT gen_subsumer_1.`i_item_sk` AS `item_sk`, sum(gen_subsumer_1.`number_sales`) AS `count(1)` 
+        |    FROM
+        |      (SELECT dt.`d_date`, dt.`d_moy`, dt.`d_year`, item.`i_brand`, item.`i_brand_id`, item.`i_item_id`, item.`i_item_desc`, item.`i_manufact_id`, substring(item.`i_item_desc`, 1, 30) AS `itemdesc`, item.`i_category`, item.`i_class`, item.`i_current_price`, item.`i_item_sk`, store_sales.`ss_store_sk`, sum(store_sales.`ss_ext_sales_price`) AS `sum_agg`, sum((CAST(CAST(store_sales.`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(store_sales.`ss_list_price` AS DECIMAL(12,2)))) AS `sales`, count(1) AS `number_sales` 
+        |      FROM
+        |        date_dim dt 
+        |        INNER JOIN store_sales ON (dt.`d_date_sk` = store_sales.`ss_sold_date_sk`)
+        |        INNER JOIN item   ON (store_sales.`ss_item_sk` = item.`i_item_sk`)
+        |      GROUP BY dt.`d_date`, dt.`d_moy`, dt.`d_year`, item.`i_brand`, item.`i_brand_id`, item.`i_item_id`, item.`i_item_desc`, item.`i_manufact_id`, substring(item.`i_item_desc`, 1, 30), item.`i_category`, item.`i_category_id`, item.`i_class`, item.`i_class_id`, item.`i_current_price`, item.`i_manager_id`, item.`i_item_sk`, store_sales.`ss_store_sk`) gen_subsumer_1 
+        |    WHERE
+        |      (gen_subsumer_1.`d_year` IN (2000, 2001, 2002, 2003))
+        |    GROUP BY gen_subsumer_1.`itemdesc`, gen_subsumer_1.`i_item_sk`, gen_subsumer_1.`d_date`) gen_subquery_2  ON (gen_subquery_2.`count(1)` > 4L) AND (web_sales.`ws_item_sk` = gen_subquery_2.`item_sk`)
+        |    LEFT SEMI JOIN (SELECT customer.`c_customer_sk`, sum((CAST(CAST(store_sales.`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(store_sales.`ss_sales_price` AS DECIMAL(12,2)))) AS `sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#271 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#274 as decimal(12,2)))), DecimalType(18,2)))` 
+        |    FROM
+        |      store_sales
+        |      INNER JOIN customer   ON (store_sales.`ss_customer_sk` = customer.`c_customer_sk`)
+        |    GROUP BY customer.`c_customer_sk`) gen_subquery_3  ON (CAST(gen_subquery_3.`sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#271 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#274 as decimal(12,2)))), DecimalType(18,2)))` AS DECIMAL(38,8)) > (0.500000BD * CAST((SELECT max(gen_scalar_subquery_1_0.`csales`) AS `tpcds_cmax`   FROM  (SELECT sum((CAST(CAST(store_sales.`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(store_sales.`ss_sales_price` AS DECIMAL(12,2)))) AS `csales`   FROM  store_sales  INNER JOIN customer   ON (store_sales.`ss_customer_sk` = customer.`c_customer_sk`)  INNER JOIN date_dim ON (date_dim.`d_year` IN (2000, 2001, 2002, 2003)) AND (store_sales.`ss_sold_date_sk` = date_dim.`d_date_sk`)  GROUP BY customer.`c_customer_sk`) gen_scalar_subquery_1_0 ) AS DECIMAL(32,6)))) AND (web_sales.`ws_bill_customer_sk` = gen_subquery_3.`c_customer_sk`)
+        |    INNER JOIN date_dim ON (date_dim.`d_year` = 2000) AND (date_dim.`d_moy` = 2) AND (web_sales.`ws_sold_date_sk` = date_dim.`d_date_sk`)) gen_subquery_4 
+        |LIMIT 100
+        """.stripMargin.trim),
+      // q14a
+      ("case_7",
+       """
+        |SELECT dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |       substr(item.i_item_desc, 1, 30) itemdesc, item.i_category, item.i_class,
+        |       item.i_current_price, item.i_item_sk, store_sales.ss_store_sk,
+        |       SUM(store_sales.ss_ext_sales_price) sum_agg,
+        |       SUM(store_sales.ss_quantity*store_sales.ss_list_price) sales, count(*) number_sales
+        |FROM date_dim dt, store_sales, item
+        |WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
+        |      AND store_sales.ss_item_sk = item.i_item_sk
+        |GROUP BY dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |         substr(item.i_item_desc, 1, 30), item.i_category, item.i_category_id,
+        |         item.i_class, item.i_class_id, item.i_current_price, item.i_manager_id,
+        |         item.i_item_sk, store_sales.ss_store_sk
+        """.stripMargin.trim,
+       """
+        |with cross_items as
+        | (select i_item_sk ss_item_sk
+        | from item,
+        |    (select iss.i_brand_id brand_id, iss.i_class_id class_id, iss.i_category_id category_id
+        |     from store_sales, item iss, date_dim d1
+        |     where ss_item_sk = iss.i_item_sk
+        |        and ss_sold_date_sk = d1.d_date_sk
+        |       and d1.d_year between 1999 AND 1999 + 2
+        |   intersect
+        |     select ics.i_brand_id, ics.i_class_id, ics.i_category_id
+        |     from catalog_sales, item ics, date_dim d2
+        |     where cs_item_sk = ics.i_item_sk
+        |       and cs_sold_date_sk = d2.d_date_sk
+        |       and d2.d_year between 1999 AND 1999 + 2
+        |   intersect
+        |     select iws.i_brand_id, iws.i_class_id, iws.i_category_id
+        |     from web_sales, item iws, date_dim d3
+        |     where ws_item_sk = iws.i_item_sk
+        |       and ws_sold_date_sk = d3.d_date_sk
+        |       and d3.d_year between 1999 AND 1999 + 2) x
+        | where i_brand_id = brand_id
+        |   and i_class_id = class_id
+        |   and i_category_id = category_id
+        |),
+        | avg_sales as
+        | (select avg(quantity*list_price) average_sales
+        |  from (
+        |     select ss_quantity quantity, ss_list_price list_price
+        |     from store_sales, date_dim
+        |     where ss_sold_date_sk = d_date_sk
+        |       and d_year between 1999 and 2001
+        |   union all
+        |     select cs_quantity quantity, cs_list_price list_price
+        |     from catalog_sales, date_dim
+        |     where cs_sold_date_sk = d_date_sk
+        |       and d_year between 1999 and 1999 + 2
+        |   union all
+        |     select ws_quantity quantity, ws_list_price list_price
+        |     from web_sales, date_dim
+        |     where ws_sold_date_sk = d_date_sk
+        |       and d_year between 1999 and 1999 + 2) x)
+        | select channel, i_brand_id,i_class_id,i_category_id,sum(sales), sum(number_sales)
+        | from(
+        |     select 'store' channel, i_brand_id,i_class_id
+        |             ,i_category_id,sum(ss_quantity*ss_list_price) sales
+        |             , count(*) number_sales
+        |     from store_sales, item, date_dim
+        |     where ss_item_sk in (select ss_item_sk from cross_items)
+        |       and ss_item_sk = i_item_sk
+        |       and ss_sold_date_sk = d_date_sk
+        |       and d_year = 1999+2
+        |       and d_moy = 11
+        |     group by i_brand_id,i_class_id,i_category_id
+        |     having sum(ss_quantity*ss_list_price) > (select average_sales from avg_sales)
+        |   union all
+        |     select 'catalog' channel, i_brand_id,i_class_id,i_category_id, sum(cs_quantity*cs_list_price) sales, count(*) number_sales
+        |     from catalog_sales, item, date_dim
+        |     where cs_item_sk in (select ss_item_sk from cross_items)
+        |       and cs_item_sk = i_item_sk
+        |       and cs_sold_date_sk = d_date_sk
+        |       and d_year = 1999+2
+        |       and d_moy = 11
+        |     group by i_brand_id,i_class_id,i_category_id
+        |     having sum(cs_quantity*cs_list_price) > (select average_sales from avg_sales)
+        |   union all
+        |     select 'web' channel, i_brand_id,i_class_id,i_category_id, sum(ws_quantity*ws_list_price) sales , count(*) number_sales
+        |     from web_sales, item, date_dim
+        |     where ws_item_sk in (select ss_item_sk from cross_items)
+        |       and ws_item_sk = i_item_sk
+        |       and ws_sold_date_sk = d_date_sk
+        |       and d_year = 1999+2
+        |       and d_moy = 11
+        |     group by i_brand_id,i_class_id,i_category_id
+        |     having sum(ws_quantity*ws_list_price) > (select average_sales from avg_sales)
+        | ) y
+        | group by rollup (channel, i_brand_id,i_class_id,i_category_id)
+        | order by channel,i_brand_id,i_class_id,i_category_id
+        | limit 100
+        """.stripMargin.trim,
+       """
+        |
+        |
+        |
+        """.stripMargin.trim),
+      // q55
+      ("case_8",
+       """
+        |SELECT dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |       substr(item.i_item_desc, 1, 30) itemdesc, item.i_category, item.i_class,
+        |       item.i_manager_id, item.i_current_price, item.i_item_sk, store_sales.ss_store_sk,
+        |       SUM(store_sales.ss_ext_sales_price) sum_agg,
+        |       SUM(store_sales.ss_quantity*store_sales.ss_list_price) sales, count(*) number_sales
+        |FROM date_dim dt, store_sales, item
+        |WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
+        |      AND store_sales.ss_item_sk = item.i_item_sk
+        |GROUP BY dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |         substr(item.i_item_desc, 1, 30), item.i_category, item.i_category_id,
+        |         item.i_class, item.i_class_id, item.i_current_price, item.i_manager_id,
+        |         item.i_item_sk, store_sales.ss_store_sk
+       """.stripMargin.trim,
+       """
+        |select i_brand_id brand_id, i_brand brand,
+        |   sum(ss_ext_sales_price) ext_price
+        | from date_dim, store_sales, item
+        | where d_date_sk = ss_sold_date_sk
+        |   and ss_item_sk = i_item_sk
+        |   and i_manager_id=28
+        |   and d_moy=11
+        |   and d_year=1999
+        | group by i_brand, i_brand_id
+        | order by ext_price desc, brand_id
+        | limit 100
+       """.stripMargin.trim,
+       """
+        |SELECT gen_subsumer_0.`i_brand_id` AS `brand_id`, gen_subsumer_0.`i_brand` AS `brand`, sum(gen_subsumer_0.`sum_agg`) AS `ext_price` 
+        |FROM
+        |  (SELECT `d_date`, `d_moy`, `d_year`, `i_brand`, `i_brand_id`, `i_item_id`, `i_item_desc`, substring(`i_item_desc`, 1, 30) AS `itemdesc`, `i_category`, `i_class`, `i_manager_id`, `i_current_price`, `i_item_sk`, `ss_store_sk`, sum(`ss_ext_sales_price`) AS `sum_agg`, sum((CAST(CAST(`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(`ss_list_price` AS DECIMAL(12,2)))) AS `sales`, count(1) AS `number_sales` 
+        |  FROM
+        |    date_dim dt 
+        |    INNER JOIN store_sales ON (`d_date_sk` = `ss_sold_date_sk`)
+        |    INNER JOIN item   ON (`ss_item_sk` = `i_item_sk`)
+        |  GROUP BY `d_date`, `d_moy`, `d_year`, `i_brand`, `i_brand_id`, `i_item_id`, `i_item_desc`, substring(`i_item_desc`, 1, 30), `i_category`, `i_category_id`, `i_class`, `i_class_id`, `i_current_price`, `i_manager_id`, `i_item_sk`, `ss_store_sk`) gen_subsumer_0 
+        |WHERE
+        |  (gen_subsumer_0.`d_moy` = 11) AND (gen_subsumer_0.`d_year` = 1999) AND (gen_subsumer_0.`i_manager_id` = 28)
+        |GROUP BY gen_subsumer_0.`i_brand`, gen_subsumer_0.`i_brand_id`
+        |ORDER BY `ext_price` DESC NULLS LAST, `brand_id` ASC NULLS FIRST
+        |LIMIT 100
+        """.stripMargin.trim),
+      // q98
+      ("case_9",
+       """
+        |SELECT dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |       substr(item.i_item_desc, 1, 30) itemdesc, item.i_category, item.i_class,
+        |       item.i_manager_id, item.i_current_price, item.i_item_sk, store_sales.ss_store_sk,
+        |       SUM(store_sales.ss_ext_sales_price) sum_agg,
+        |       SUM(store_sales.ss_quantity*store_sales.ss_list_price) sales, count(*) number_sales
+        |FROM date_dim dt, store_sales, item
+        |WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
+        |      AND store_sales.ss_item_sk = item.i_item_sk
+        |GROUP BY dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |         substr(item.i_item_desc, 1, 30), item.i_category, item.i_category_id,
+        |         item.i_class, item.i_class_id, item.i_current_price, item.i_manager_id,
+        |         item.i_item_sk, store_sales.ss_store_sk
+       """.stripMargin.trim,
+       """
+        |select i_item_desc, i_category, i_class, i_current_price
+        |      ,sum(ss_ext_sales_price) as itemrevenue
+        |      ,sum(ss_ext_sales_price)*100/sum(sum(ss_ext_sales_price)) over
+        |          (partition by i_class) as revenueratio
+        |from
+        |    store_sales, item, date_dim
+        |where
+        |   ss_item_sk = i_item_sk
+        |   and i_category in ('Sports', 'Books', 'Home')
+        |   and ss_sold_date_sk = d_date_sk
+        |   and d_date between cast('1999-02-22' as date)
+        |                           and (cast('1999-02-22' as date) + interval 30 days)
+        |group by
+        |   i_item_id, i_item_desc, i_category, i_class, i_current_price
+        |order by
+        |   i_category, i_class, i_item_id, i_item_desc, revenueratio
+       """.stripMargin.trim,
+       """
+        |
+        |
+        |
+        """.stripMargin.trim),
+      // q76
+      ("case_10",
+       """
+        |SELECT dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |       substr(item.i_item_desc, 1, 30) itemdesc, item.i_category, item.i_class,
+        |       item.i_manager_id, item.i_current_price, item.i_item_sk, store_sales.ss_store_sk,
+        |       SUM(store_sales.ss_ext_sales_price) sum_agg,
+        |       SUM(store_sales.ss_quantity*store_sales.ss_list_price) sales, count(*) number_sales
+        |FROM date_dim dt, store_sales, item
+        |WHERE dt.d_date_sk = store_sales.ss_sold_date_sk
+        |      AND store_sales.ss_item_sk = item.i_item_sk
+        |GROUP BY dt.d_date, dt.d_moy, dt.d_year, item.i_brand, item.i_brand_id, item.i_item_id, item.i_item_desc,
+        |         substr(item.i_item_desc, 1, 30), item.i_category, item.i_category_id,
+        |         item.i_class, item.i_class_id, item.i_current_price, item.i_manager_id,
+        |         item.i_item_sk, store_sales.ss_store_sk
+       """.stripMargin.trim,
+       """
+        | SELECT
+        |    channel, col_name, d_year, d_qoy, i_category, COUNT(*) sales_cnt,
+        |    SUM(ext_sales_price) sales_amt
+        | FROM(
+        |    SELECT
+        |        'store' as channel, ss_store_sk col_name, d_year, d_qoy, i_category,
+        |        ss_ext_sales_price ext_sales_price
+        |    FROM store_sales, item, date_dim
+        |    WHERE ss_store_sk IS NULL
+        |      AND ss_sold_date_sk=d_date_sk
+        |      AND ss_item_sk=i_item_sk
+        |    UNION ALL
+        |    SELECT
+        |        'web' as channel, ws_ship_customer_sk col_name, d_year, d_qoy, i_category,
+        |        ws_ext_sales_price ext_sales_price
+        |    FROM web_sales, item, date_dim
+        |    WHERE ws_ship_customer_sk IS NULL
+        |      AND ws_sold_date_sk=d_date_sk
+        |      AND ws_item_sk=i_item_sk
+        |    UNION ALL
+        |    SELECT
+        |        'catalog' as channel, cs_ship_addr_sk col_name, d_year, d_qoy, i_category,
+        |        cs_ext_sales_price ext_sales_price
+        |    FROM catalog_sales, item, date_dim
+        |    WHERE cs_ship_addr_sk IS NULL
+        |      AND cs_sold_date_sk=d_date_sk
+        |      AND cs_item_sk=i_item_sk) foo
+        | GROUP BY channel, col_name, d_year, d_qoy, i_category
+        | ORDER BY channel, col_name, d_year, d_qoy, i_category
+        | limit 100
+       """.stripMargin.trim,
+       """
+        |
+        |
+        |
+        """.stripMargin.trim),
+      // the following four cases involve a MV of catalog_sales, item, date_dim
+      // q20
+      ("case_11",
+       """
+        |SELECT cs_ship_addr_sk , d_date, d_year, d_qoy, d_moy, i_category,i_item_sk, i_item_id,
+        |       i_item_desc, i_class, i_current_price, i_brand_id, i_class_id, i_category_id, i_manufact_id,
+        |       SUM(cs_ext_sales_price) sales_amt, 
+        |       SUM(cs_quantity*cs_list_price) sales,
+        |       SUM(cs_ext_discount_amt) as `excess discount amount`,
+        |       count(*) number_sales
+        |FROM catalog_sales, item, date_dim
+        |WHERE cs_item_sk = i_item_sk
+        |  AND cs_sold_date_sk = d_date_sk      
+        |GROUP BY i_brand_id, i_class_id, i_category_id, i_item_id, i_item_desc, i_category, i_class,
+        |         i_current_price, i_manufact_id, d_date, d_moy, d_qoy, d_year, cs_ship_addr_sk, i_item_sk
+       """.stripMargin.trim,
+       """
+        |select i_item_desc
+        |       ,i_category
+        |       ,i_class
+        |       ,i_current_price
+        |       ,sum(cs_ext_sales_price) as itemrevenue
+        |       ,sum(cs_ext_sales_price)*100/sum(sum(cs_ext_sales_price)) over
+        |           (partition by i_class) as revenueratio
+        | from catalog_sales, item, date_dim
+        | where cs_item_sk = i_item_sk
+        |   and i_category in ('Sports', 'Books', 'Home')
+        |   and cs_sold_date_sk = d_date_sk
+        | and d_date between cast('1999-02-22' as date)
+        |                           and (cast('1999-02-22' as date) + interval 30 days)
+        | group by i_item_id, i_item_desc, i_category, i_class, i_current_price
+        | order by i_category, i_class, i_item_id, i_item_desc, revenueratio
+        | limit 100
+       """.stripMargin.trim,
+       """
+        |SELECT gen_subquery_1.`i_item_desc`, gen_subquery_1.`i_category`, gen_subquery_1.`i_class`, gen_subquery_1.`i_current_price`, gen_subquery_1.`itemrevenue`, ((gen_subquery_1.`_w0` * 100.00BD) / CAST(gen_subquery_1.`_we0` AS DECIMAL(28,2))) AS `revenueratio` 
+        |FROM
+        |  (SELECT gen_subquery_0.`i_item_desc`, gen_subquery_0.`i_category`, gen_subquery_0.`i_class`, gen_subquery_0.`i_current_price`, gen_subquery_0.`itemrevenue`, gen_subquery_0.`_w0`, gen_subquery_0.`_w1`, gen_subquery_0.`i_item_id`, sum(gen_subquery_0.`_w1`) OVER (PARTITION BY gen_subquery_0.`i_class` ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS `_we0` 
+        |  FROM
+        |    (SELECT gen_subsumer_0.`i_item_desc`, gen_subsumer_0.`i_category`, gen_subsumer_0.`i_class`, gen_subsumer_0.`i_current_price`, sum(gen_subsumer_0.`sales_amt`) AS `itemrevenue`, sum(gen_subsumer_0.`sales_amt`) AS `_w0`, sum(gen_subsumer_0.`sales_amt`) AS `_w1`, gen_subsumer_0.`i_item_id` 
+        |    FROM
+        |      (SELECT `cs_ship_addr_sk`, `d_date`, `d_year`, `d_qoy`, `d_moy`, `i_category`, `cs_ship_addr_sk`, `i_item_sk`, `i_item_id`, `i_item_desc`, `i_class`, `i_current_price`, `i_brand_id`, `i_class_id`, `i_category_id`, `i_manufact_id`, sum(`cs_ext_sales_price`) AS `sales_amt`, sum((CAST(CAST(`cs_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(`cs_list_price` AS DECIMAL(12,2)))) AS `sales`, sum(`cs_ext_discount_amt`) AS `excess discount amount`, count(1) AS `number_sales` 
+        |      FROM
+        |        catalog_sales
+        |        INNER JOIN item   ON (`cs_item_sk` = `i_item_sk`)
+        |        INNER JOIN date_dim ON (`cs_sold_date_sk` = `d_date_sk`)
+        |      GROUP BY `i_brand_id`, `i_class_id`, `i_category_id`, `i_item_id`, `i_item_desc`, `i_category`, `i_class`, `i_current_price`, `i_manufact_id`, `d_date`, `d_moy`, `d_qoy`, `d_year`, `cs_ship_addr_sk`, `i_item_sk`) gen_subsumer_0 
+        |    WHERE
+        |      (gen_subsumer_0.`i_category` IN ('Sports', 'Books', 'Home')) AND (gen_subsumer_0.`d_date` >= DATE '1999-02-22') AND (gen_subsumer_0.`d_date` <= DATE '1999-03-24')
+        |    GROUP BY gen_subsumer_0.`i_item_id`, gen_subsumer_0.`i_item_desc`, gen_subsumer_0.`i_category`, gen_subsumer_0.`i_class`, gen_subsumer_0.`i_current_price`) gen_subquery_0 ) gen_subquery_1 
+        |ORDER BY gen_subquery_1.`i_category` ASC NULLS FIRST, gen_subquery_1.`i_class` ASC NULLS FIRST, gen_subquery_1.`i_item_id` ASC NULLS FIRST, gen_subquery_1.`i_item_desc` ASC NULLS FIRST, `revenueratio` ASC NULLS FIRST
+        |LIMIT 100
+       """.stripMargin.trim),
+      // q32
+      ("case_12",
+       """
+        |SELECT cs_ship_addr_sk , d_date, d_year, d_qoy, d_moy, i_category, cs_ship_addr_sk,i_item_sk, i_item_id,
+        |       i_item_desc, i_class, i_current_price, i_brand_id, i_class_id, i_category_id, i_manufact_id,
+        |       SUM(cs_ext_sales_price) sales_amt, 
+        |       SUM(cs_quantity*cs_list_price) sales,
+        |       SUM(cs_ext_discount_amt) as `excess discount amount`,
+        |       count(*) number_sales
+        |FROM catalog_sales, item, date_dim
+        |WHERE cs_item_sk = i_item_sk
+        |  AND cs_sold_date_sk = d_date_sk      
+        |GROUP BY i_brand_id, i_class_id, i_category_id, i_item_id, i_item_desc, i_category, i_class,
+        |         i_current_price, i_manufact_id, d_date, d_moy, d_qoy, d_year, cs_ship_addr_sk, i_item_sk
+       """.stripMargin.trim,
+       """
+        | select sum(cs_ext_discount_amt) as `excess discount amount`
+        | from
+        |    catalog_sales, item, date_dim
+        | where
+        |   i_manufact_id = 977
+        |   and i_item_sk = cs_item_sk
+        |   and d_date between '2000-01-27' and (cast('2000-01-27' as date) + interval 90 days)
+        |   and d_date_sk = cs_sold_date_sk
+        |   and cs_ext_discount_amt > (
+        |          select 1.3 * avg(cs_ext_discount_amt)
+        |          from catalog_sales, date_dim
+        |          where cs_item_sk = i_item_sk
+        |           and d_date between '2000-01-27]' and (cast('2000-01-27' as date) + interval 90 days)
+        |           and d_date_sk = cs_sold_date_sk)
+        |limit 100
+       """.stripMargin.trim,
+       """
+        |
+        |
+        |
+        """.stripMargin.trim),
+      // q58 debug
+      ("case_13",
+       """
+        |SELECT cs_ship_addr_sk , d_date, d_year, d_qoy, d_moy, i_category, cs_ext_sales_price, cs_ship_addr_sk, i_item_sk, i_item_id,
+        |       i_item_desc, i_class, i_current_price, i_brand_id, i_class_id, i_category_id, i_manufact_id,
+        |       SUM(cs_ext_sales_price) sales_amt, 
+        |       SUM(cs_ext_sales_price)*100/sum(sum(cs_ext_sales_price)) over
+        |          (partition by i_class) as revenueratio
+        |       SUM(cs_quantity*cs_list_price) sales,
+        |       SUM(cs_ext_discount_amt) as `excess discount amount`,
+        |       count(*) number_sales
+        |FROM catalog_sales, item, date_dim
+        |WHERE cs_item_sk = i_item_sk
+        |  AND cs_sold_date_sk = d_date_sk      
+        |GROUP BY i_brand_id, i_class_id, i_category_id, i_item_id, i_item_desc, i_category, i_class,
+        |         i_current_price, i_manufact_id, d_date, d_moy, d_qoy, d_year, cs_ship_addr_sk, i_item_sk
+       """.stripMargin.trim,
+       """
+        | with ss_items as
+        | (select i_item_id item_id, sum(ss_ext_sales_price) ss_item_rev
+        | from store_sales, item, date_dim
+        | where ss_item_sk = i_item_sk
+        |   and d_date in (select d_date
+        |                  from date_dim
+        |                  where d_week_seq = (select d_week_seq
+        |                                      from date_dim
+        |                                      where d_date = '2000-01-03'))
+        |   and ss_sold_date_sk   = d_date_sk
+        | group by i_item_id),
+        | cs_items as
+        | (select i_item_id item_id
+        |        ,sum(cs_ext_sales_price) cs_item_rev
+        |  from catalog_sales, item, date_dim
+        | where cs_item_sk = i_item_sk
+        |  and  d_date in (select d_date
+        |                  from date_dim
+        |                  where d_week_seq = (select d_week_seq
+        |                                      from date_dim
+        |                                      where d_date = '2000-01-03'))
+        |  and  cs_sold_date_sk = d_date_sk
+        | group by i_item_id),
+        | ws_items as
+        | (select i_item_id item_id, sum(ws_ext_sales_price) ws_item_rev
+        |  from web_sales, item, date_dim
+        | where ws_item_sk = i_item_sk
+        |  and  d_date in (select d_date
+        |                  from date_dim
+        |                  where d_week_seq =(select d_week_seq
+        |                                     from date_dim
+        |                                     where d_date = '2000-01-03'))
+        |  and ws_sold_date_sk   = d_date_sk
+        | group by i_item_id)
+        | select ss_items.item_id
+        |       ,ss_item_rev
+        |       ,ss_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 ss_dev
+        |       ,cs_item_rev
+        |       ,cs_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 cs_dev
+        |       ,ws_item_rev
+        |       ,ws_item_rev/(ss_item_rev+cs_item_rev+ws_item_rev)/3 * 100 ws_dev
+        |       ,(ss_item_rev+cs_item_rev+ws_item_rev)/3 average
+        | from ss_items,cs_items,ws_items
+        | where ss_items.item_id=cs_items.item_id
+        |   and ss_items.item_id=ws_items.item_id
+        |   and ss_item_rev between 0.9 * cs_item_rev and 1.1 * cs_item_rev
+        |   and ss_item_rev between 0.9 * ws_item_rev and 1.1 * ws_item_rev
+        |   and cs_item_rev between 0.9 * ss_item_rev and 1.1 * ss_item_rev
+        |   and cs_item_rev between 0.9 * ws_item_rev and 1.1 * ws_item_rev
+        |   and ws_item_rev between 0.9 * ss_item_rev and 1.1 * ss_item_rev
+        |   and ws_item_rev between 0.9 * cs_item_rev and 1.1 * cs_item_rev
+        | order by item_id, ss_item_rev
+        | limit 100
+       """.stripMargin.trim,
+       """
+        |
+        |
+        |
+        """.stripMargin.trim),
+      // q76
+      ("case_14",
+       """
+        |SELECT cs_ship_addr_sk , d_date, d_year, d_qoy, d_moy, i_category, cs_ext_sales_price, cs_ship_addr_sk, i_item_sk, i_item_id,
+        |       i_item_desc, i_class, i_current_price, i_brand_id, i_class_id, i_category_id, i_manufact_id,
+        |       SUM(cs_ext_sales_price) sales_amt, 
+        |       SUM(cs_quantity*cs_list_price) sales,
+        |       SUM(cs_ext_discount_amt) as `excess discount amount`,
+        |       count(*) number_sales
+        |FROM catalog_sales, item, date_dim
+        |WHERE cs_item_sk = i_item_sk
+        |  AND cs_sold_date_sk = d_date_sk      
+        |GROUP BY i_brand_id, i_class_id, i_category_id, i_item_id, i_item_desc, i_category, i_class,
+        |         i_current_price, i_manufact_id, d_date, d_moy, d_qoy, d_year, cs_ship_addr_sk, i_item_sk
+       """.stripMargin.trim,
+       """
+        | SELECT
+        |    channel, col_name, d_year, d_qoy, i_category, COUNT(*) sales_cnt,
+        |    SUM(ext_sales_price) sales_amt
+        | FROM(
+        |    SELECT
+        |        'store' as channel, ss_store_sk col_name, d_year, d_qoy, i_category,
+        |        ss_ext_sales_price ext_sales_price
+        |    FROM store_sales, item, date_dim
+        |    WHERE ss_store_sk IS NULL
+        |      AND ss_sold_date_sk=d_date_sk
+        |      AND ss_item_sk=i_item_sk
+        |    UNION ALL
+        |    SELECT
+        |        'web' as channel, ws_ship_customer_sk col_name, d_year, d_qoy, i_category,
+        |        ws_ext_sales_price ext_sales_price
+        |    FROM web_sales, item, date_dim
+        |    WHERE ws_ship_customer_sk IS NULL
+        |      AND ws_sold_date_sk=d_date_sk
+        |      AND ws_item_sk=i_item_sk
+        |    UNION ALL
+        |    SELECT
+        |        'catalog' as channel, cs_ship_addr_sk col_name, d_year, d_qoy, i_category,
+        |        cs_ext_sales_price ext_sales_price
+        |    FROM catalog_sales, item, date_dim
+        |    WHERE cs_ship_addr_sk IS NULL
+        |      AND cs_sold_date_sk=d_date_sk
+        |      AND cs_item_sk=i_item_sk) foo
+        | GROUP BY channel, col_name, d_year, d_qoy, i_category
+        | ORDER BY channel, col_name, d_year, d_qoy, i_category
+        | limit 100
+       """.stripMargin.trim,
+       """
+        |
+        |
+        |
+        """.stripMargin.trim),
+      // the following two cases involve a MV of store_sales and customer
+      // q23a
+      ("case_15",
+       """
+        | SELECT c_customer_sk,
+        |        sum(ss_quantity*ss_sales_price) csales
+        | FROM customer, store_sales
+        | WHERE c_customer_sk = ss_customer_sk
+        | GROUP BY c_customer_sk
+       """.stripMargin.trim,
+       """
+        | with frequent_ss_items as
+        | (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt
+        |  from store_sales, date_dim, item
+        |  where ss_sold_date_sk = d_date_sk
+        |    and ss_item_sk = i_item_sk
+        |    and d_year in (2000, 2000+1, 2000+2,2000+3)
+        |  group by substr(i_item_desc,1,30),i_item_sk,d_date
+        |  having count(*) >4),
+        | max_store_sales as
+        | (select max(csales) tpcds_cmax
+        |  from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales
+        |        from store_sales, customer, date_dim
+        |        where ss_customer_sk = c_customer_sk
+        |         and ss_sold_date_sk = d_date_sk
+        |         and d_year in (2000, 2000+1, 2000+2,2000+3)
+        |        group by c_customer_sk) x),
+        | best_ss_customer as
+        | (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales
+        |  from store_sales, customer
+        |  where ss_customer_sk = c_customer_sk
+        |  group by c_customer_sk
+        |  having sum(ss_quantity*ss_sales_price) > (50/100.0) *
+        |    (select * from max_store_sales))
+        | select sum(sales)
+        | from ((select cs_quantity*cs_list_price sales
+        |       from catalog_sales, date_dim
+        |       where d_year = 2000
+        |         and d_moy = 2
+        |         and cs_sold_date_sk = d_date_sk
+        |         and cs_item_sk in (select item_sk from frequent_ss_items)
+        |         and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer))
+        |      union all
+        |      (select ws_quantity*ws_list_price sales
+        |       from web_sales, date_dim
+        |       where d_year = 2000
+        |         and d_moy = 2
+        |         and ws_sold_date_sk = d_date_sk
+        |         and ws_item_sk in (select item_sk from frequent_ss_items)
+        |         and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer))) y
+        | limit 100
+       """.stripMargin.trim,
+       """
+        |
+        |
+        |
+        """.stripMargin.trim),
+      // q23b
+      ("case_16",
+       """
+        | SELECT c_customer_sk,
+        |        sum(ss_quantity*ss_sales_price) csales
+        | FROM customer, store_sales
+        | WHERE c_customer_sk = ss_customer_sk
+        | GROUP BY c_customer_sk
+       """.stripMargin.trim,
+       """
+        |
+        | with frequent_ss_items as
+        | (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt
+        |  from store_sales, date_dim, item
+        |  where ss_sold_date_sk = d_date_sk
+        |    and ss_item_sk = i_item_sk
+        |    and d_year in (2000, 2000+1, 2000+2,2000+3)
+        |  group by substr(i_item_desc,1,30),i_item_sk,d_date
+        |  having count(*) > 4),
+        | max_store_sales as
+        | (select max(csales) tpcds_cmax
+        |  from (select c_customer_sk,sum(ss_quantity*ss_sales_price) csales
+        |        from store_sales, customer, date_dim
+        |        where ss_customer_sk = c_customer_sk
+        |         and ss_sold_date_sk = d_date_sk
+        |         and d_year in (2000, 2000+1, 2000+2,2000+3)
+        |        group by c_customer_sk) x),
+        | best_ss_customer as
+        | (select c_customer_sk,sum(ss_quantity*ss_sales_price) ssales
+        |  from store_sales
+        |      ,customer
+        |  where ss_customer_sk = c_customer_sk
+        |  group by c_customer_sk
+        |  having sum(ss_quantity*ss_sales_price) > (50/100.0) *
+        |    (select * from max_store_sales))
+        | select c_last_name,c_first_name,sales
+        | from ((select c_last_name,c_first_name,sum(cs_quantity*cs_list_price) sales
+        |        from catalog_sales, customer, date_dim
+        |        where d_year = 2000
+        |         and d_moy = 2
+        |         and cs_sold_date_sk = d_date_sk
+        |         and cs_item_sk in (select item_sk from frequent_ss_items)
+        |         and cs_bill_customer_sk in (select c_customer_sk from best_ss_customer)
+        |         and cs_bill_customer_sk = c_customer_sk
+        |       group by c_last_name,c_first_name)
+        |      union all
+        |      (select c_last_name,c_first_name,sum(ws_quantity*ws_list_price) sales
+        |       from web_sales, customer, date_dim
+        |       where d_year = 2000
+        |         and d_moy = 2
+        |         and ws_sold_date_sk = d_date_sk
+        |         and ws_item_sk in (select item_sk from frequent_ss_items)
+        |         and ws_bill_customer_sk in (select c_customer_sk from best_ss_customer)
+        |         and ws_bill_customer_sk = c_customer_sk
+        |       group by c_last_name,c_first_name)) y
+        |     order by c_last_name,c_first_name,sales
+        | limit 100
+       """.stripMargin.trim,
+       """
+        |SELECT gen_subquery_6.`c_last_name`, gen_subquery_6.`c_first_name`, gen_subquery_6.`sales` 
+        |FROM
+        |  (SELECT gen_subquery_2.`c_last_name`, gen_subquery_2.`c_first_name`, sum((CAST(CAST(gen_subquery_2.`cs_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(gen_subquery_2.`cs_list_price` AS DECIMAL(12,2)))) AS `sales` 
+        |  FROM
+        |    (SELECT `cs_quantity`, `cs_list_price`, `c_first_name`, `c_last_name` 
+        |    FROM
+        |      catalog_sales
+        |      LEFT SEMI JOIN (SELECT item.`i_item_sk` AS `item_sk`, count(1) AS `count(1)` 
+        |      FROM
+        |        store_sales
+        |        INNER JOIN date_dim ON (date_dim.`d_year` IN (2000, 2001, 2002, 2003)) AND (store_sales.`ss_sold_date_sk` = date_dim.`d_date_sk`)
+        |        INNER JOIN item   ON (store_sales.`ss_item_sk` = item.`i_item_sk`)
+        |      GROUP BY substring(item.`i_item_desc`, 1, 30), item.`i_item_sk`, date_dim.`d_date`) gen_subquery_0  ON (gen_subquery_0.`count(1)` > 4L) AND (catalog_sales.`cs_item_sk` = gen_subquery_0.`item_sk`)
+        |      LEFT SEMI JOIN (SELECT customer.`c_customer_sk`, sum((CAST(CAST(store_sales.`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(store_sales.`ss_sales_price` AS DECIMAL(12,2)))) AS `csales` 
+        |      FROM
+        |        customer  
+        |        INNER JOIN store_sales ON (customer.`c_customer_sk` = store_sales.`ss_customer_sk`)
+        |      GROUP BY customer.`c_customer_sk`) gen_subquery_1  ON (catalog_sales.`cs_bill_customer_sk` = gen_subquery_1.`c_customer_sk`)
+        |      INNER JOIN customer ON (catalog_sales.`cs_bill_customer_sk` = customer.`c_customer_sk`)
+        |      INNER JOIN date_dim ON (date_dim.`d_year` = 2000) AND (date_dim.`d_moy` = 2) AND (catalog_sales.`cs_sold_date_sk` = date_dim.`d_date_sk`)
+        |    WHERE
+        |      (CAST(`sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#219 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#222 as decimal(12,2)))), DecimalType(18,2)))` AS DECIMAL(38,8)) > (0.500000BD * CAST((SELECT max(gen_expression_0_0.`csales`) AS `tpcds_cmax`   FROM  (SELECT sum((CAST(CAST(store_sales.`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(store_sales.`ss_sales_price` AS DECIMAL(12,2)))) AS `csales`   FROM  store_sales  INNER JOIN customer   ON (store_sales.`ss_customer_sk` = customer.`c_customer_sk`)  INNER JOIN date_dim ON (date_dim.`d_year` IN (2000, 2001, 2002, 2003)) AND (store_sales.`ss_sold_date_sk` = date_dim.`d_date_sk`)  GROUP BY customer.`c_customer_sk`) gen_expression_0_0 ) AS DECIMAL(32,6))))) gen_subquery_2 
+        |  GROUP BY gen_subquery_2.`c_last_name`, gen_subquery_2.`c_first_name`
+        |  UNION ALL
+        |  SELECT gen_subquery_5.`c_last_name`, gen_subquery_5.`c_first_name`, sum((CAST(CAST(gen_subquery_5.`ws_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(gen_subquery_5.`ws_list_price` AS DECIMAL(12,2)))) AS `sales` 
+        |  FROM
+        |    (SELECT `ws_quantity`, `ws_list_price`, `c_first_name`, `c_last_name` 
+        |    FROM
+        |      web_sales
+        |      LEFT SEMI JOIN (SELECT item.`i_item_sk` AS `item_sk`, count(1) AS `count(1)` 
+        |      FROM
+        |        store_sales
+        |        INNER JOIN date_dim ON (date_dim.`d_year` IN (2000, 2001, 2002, 2003)) AND (store_sales.`ss_sold_date_sk` = date_dim.`d_date_sk`)
+        |        INNER JOIN item   ON (store_sales.`ss_item_sk` = item.`i_item_sk`)
+        |      GROUP BY substring(item.`i_item_desc`, 1, 30), item.`i_item_sk`, date_dim.`d_date`) gen_subquery_3  ON (gen_subquery_3.`count(1)` > 4L) AND (web_sales.`ws_item_sk` = gen_subquery_3.`item_sk`)
+        |      LEFT SEMI JOIN (SELECT customer.`c_customer_sk`, sum((CAST(CAST(store_sales.`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(store_sales.`ss_sales_price` AS DECIMAL(12,2)))) AS `csales` 
+        |      FROM
+        |        customer  
+        |        INNER JOIN store_sales ON (customer.`c_customer_sk` = store_sales.`ss_customer_sk`)
+        |      GROUP BY customer.`c_customer_sk`) gen_subquery_4  ON (web_sales.`ws_bill_customer_sk` = gen_subquery_4.`c_customer_sk`)
+        |      INNER JOIN customer ON (web_sales.`ws_bill_customer_sk` = customer.`c_customer_sk`)
+        |      INNER JOIN date_dim ON (date_dim.`d_year` = 2000) AND (date_dim.`d_moy` = 2) AND (web_sales.`ws_sold_date_sk` = date_dim.`d_date_sk`)
+        |    WHERE
+        |      (CAST(`sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#219 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#222 as decimal(12,2)))), DecimalType(18,2)))` AS DECIMAL(38,8)) > (0.500000BD * CAST((SELECT max(gen_expression_1_0.`csales`) AS `tpcds_cmax`   FROM  (SELECT sum((CAST(CAST(store_sales.`ss_quantity` AS DECIMAL(10,0)) AS DECIMAL(12,2)) * CAST(store_sales.`ss_sales_price` AS DECIMAL(12,2)))) AS `csales`   FROM  store_sales  INNER JOIN customer   ON (store_sales.`ss_customer_sk` = customer.`c_customer_sk`)  INNER JOIN date_dim ON (date_dim.`d_year` IN (2000, 2001, 2002, 2003)) AND (store_sales.`ss_sold_date_sk` = date_dim.`d_date_sk`)  GROUP BY customer.`c_customer_sk`) gen_expression_1_0 ) AS DECIMAL(32,6))))) gen_subquery_5 
+        |  GROUP BY gen_subquery_5.`c_last_name`, gen_subquery_5.`c_first_name`) gen_subquery_6 
+        |ORDER BY gen_subquery_6.`c_last_name` ASC NULLS FIRST, gen_subquery_6.`c_first_name` ASC NULLS FIRST, gen_subquery_6.`sales` ASC NULLS FIRST
+        |LIMIT 100
+       """.stripMargin.trim),
+      // the following cases involve a MV of store_sales, customer and date
+      // q4
+      ("case_17",
+       """
+        | SELECT c_customer_id customer_id,
+        |        c_first_name customer_first_name,
+        |        c_last_name customer_last_name,
+        |        c_preferred_cust_flag customer_preferred_cust_flag,
+        |        c_birth_country customer_birth_country,
+        |        c_login customer_login,
+        |        c_email_address customer_email_address,
+        |        d_year dyear,
+        |        d_date ddate,
+        |        sum(((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2) year_total,
+        |        sum(ss_ext_list_price-ss_ext_discount_amt) year_total1,
+        |        sum(ss_net_paid) year_total_74,
+        |        's' sale_type
+        | FROM customer, store_sales, date_dim
+        | WHERE c_customer_sk = ss_customer_sk AND ss_sold_date_sk = d_date_sk
+        | GROUP BY c_customer_id,
+        |          c_first_name,
+        |          c_last_name,
+        |          c_preferred_cust_flag,
+        |          c_birth_country,
+        |          c_login,
+        |          c_email_address,
+        |          d_year,
+        |          d_date
+       """.stripMargin.trim,
+       """
+        |WITH year_total AS (
+        | SELECT c_customer_id customer_id,
+        |        c_first_name customer_first_name,
+        |        c_last_name customer_last_name,
+        |        c_preferred_cust_flag customer_preferred_cust_flag,
+        |        c_birth_country customer_birth_country,
+        |        c_login customer_login,
+        |        c_email_address customer_email_address,
+        |        d_year dyear,
+        |        sum(((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2) year_total,
+        |        's' sale_type
+        | FROM customer, store_sales, date_dim
+        | WHERE c_customer_sk = ss_customer_sk AND ss_sold_date_sk = d_date_sk
+        | GROUP BY c_customer_id,
+        |          c_first_name,
+        |          c_last_name,
+        |          c_preferred_cust_flag,
+        |          c_birth_country,
+        |          c_login,
+        |          c_email_address,
+        |          d_year
+        | UNION ALL
+        | SELECT c_customer_id customer_id,
+        |        c_first_name customer_first_name,
+        |        c_last_name customer_last_name,
+        |        c_preferred_cust_flag customer_preferred_cust_flag,
+        |        c_birth_country customer_birth_country,
+        |        c_login customer_login,
+        |        c_email_address customer_email_address,
+        |        d_year dyear,
+        |        sum((((cs_ext_list_price-cs_ext_wholesale_cost-cs_ext_discount_amt)+cs_ext_sales_price)/2) ) year_total,
+        |        'c' sale_type
+        | FROM customer, catalog_sales, date_dim
+        | WHERE c_customer_sk = cs_bill_customer_sk AND cs_sold_date_sk = d_date_sk
+        | GROUP BY c_customer_id,
+        |          c_first_name,
+        |          c_last_name,
+        |          c_preferred_cust_flag,
+        |          c_birth_country,
+        |          c_login,
+        |          c_email_address,
+        |          d_year
+        | UNION ALL
+        | SELECT c_customer_id customer_id
+        |       ,c_first_name customer_first_name
+        |       ,c_last_name customer_last_name
+        |       ,c_preferred_cust_flag customer_preferred_cust_flag
+        |       ,c_birth_country customer_birth_country
+        |       ,c_login customer_login
+        |       ,c_email_address customer_email_address
+        |       ,d_year dyear
+        |       ,sum((((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)+ws_ext_sales_price)/2) ) year_total
+        |       ,'w' sale_type
+        | FROM customer, web_sales, date_dim
+        | WHERE c_customer_sk = ws_bill_customer_sk AND ws_sold_date_sk = d_date_sk
+        | GROUP BY c_customer_id,
+        |          c_first_name,
+        |          c_last_name,
+        |          c_preferred_cust_flag,
+        |          c_birth_country,
+        |          c_login,
+        |          c_email_address,
+        |          d_year)
+        | SELECT
+        |   t_s_secyear.customer_id,
+        |   t_s_secyear.customer_first_name,
+        |   t_s_secyear.customer_last_name,
+        |   t_s_secyear.customer_preferred_cust_flag,
+        |   t_s_secyear.customer_birth_country,
+        |   t_s_secyear.customer_login,
+        |   t_s_secyear.customer_email_address
+        | FROM year_total t_s_firstyear, year_total t_s_secyear, year_total t_c_firstyear,
+        |      year_total t_c_secyear, year_total t_w_firstyear, year_total t_w_secyear
+        | WHERE t_s_secyear.customer_id = t_s_firstyear.customer_id
+        |   and t_s_firstyear.customer_id = t_c_secyear.customer_id
+        |   and t_s_firstyear.customer_id = t_c_firstyear.customer_id
+        |   and t_s_firstyear.customer_id = t_w_firstyear.customer_id
+        |   and t_s_firstyear.customer_id = t_w_secyear.customer_id
+        |   and t_s_firstyear.sale_type = 's'
+        |   and t_c_firstyear.sale_type = 'c'
+        |   and t_w_firstyear.sale_type = 'w'
+        |   and t_s_secyear.sale_type = 's'
+        |   and t_c_secyear.sale_type = 'c'
+        |   and t_w_secyear.sale_type = 'w'
+        |   and t_s_firstyear.dyear = 2001
+        |   and t_s_secyear.dyear = 2001+1
+        |   and t_c_firstyear.dyear = 2001
+        |   and t_c_secyear.dyear = 2001+1
+        |   and t_w_firstyear.dyear = 2001
+        |   and t_w_secyear.dyear = 2001+1
+        |   and t_s_firstyear.year_total > 0
+        |   and t_c_firstyear.year_total > 0
+        |   and t_w_firstyear.year_total > 0
+        |   and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total / t_c_firstyear.year_total else null end
+        |           > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end
+        |   and case when t_c_firstyear.year_total > 0 then t_c_secyear.year_total / t_c_firstyear.year_total else null end
+        |           > case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end
+        | ORDER BY
+        |   t_s_secyear.customer_id,
+        |   t_s_secyear.customer_first_name,
+        |   t_s_secyear.customer_last_name,
+        |   t_s_secyear.customer_preferred_cust_flag,
+        |   t_s_secyear.customer_birth_country,
+        |   t_s_secyear.customer_login,
+        |   t_s_secyear.customer_email_address
+        | LIMIT 100
+       """.stripMargin.trim,
+       """
+        |
+        |
+        |
+        """.stripMargin.trim),
+      //q11
+      ("case_18",
+       """
+        | SELECT c_customer_id customer_id,
+        |        c_first_name customer_first_name,
+        |        c_last_name customer_last_name,
+        |        c_preferred_cust_flag customer_preferred_cust_flag,
+        |        c_birth_country customer_birth_country,
+        |        c_login customer_login,
+        |        c_email_address customer_email_address,
+        |        d_year dyear,
+        |        d_date ddate,
+        |        sum(((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2) year_total,
+        |        sum(ss_ext_list_price-ss_ext_discount_amt) year_total1,
+        |        sum(ss_net_paid) year_total_74,
+        |        's' sale_type
+        | FROM customer, store_sales, date_dim
+        | WHERE c_customer_sk = ss_customer_sk AND ss_sold_date_sk = d_date_sk
+        | GROUP BY c_customer_id,
+        |          c_first_name,
+        |          c_last_name,
+        |          c_preferred_cust_flag,
+        |          c_birth_country,
+        |          c_login,
+        |          c_email_address,
+        |          d_year,
+        |          d_date
+       """.stripMargin.trim,
+       """
+        | with year_total as (
+        | select c_customer_id customer_id
+        |       ,c_first_name customer_first_name
+        |       ,c_last_name customer_last_name
+        |       ,c_preferred_cust_flag customer_preferred_cust_flag
+        |       ,c_birth_country customer_birth_country
+        |       ,c_login customer_login
+        |       ,c_email_address customer_email_address
+        |       ,d_year dyear
+        |       ,sum(ss_ext_list_price-ss_ext_discount_amt) year_total
+        |       ,'s' sale_type
+        | from customer, store_sales, date_dim
+        | where c_customer_sk = ss_customer_sk
+        |   and ss_sold_date_sk = d_date_sk
+        | group by c_customer_id
+        |         ,c_first_name
+        |         ,c_last_name
+        |         ,d_year
+        |         ,c_preferred_cust_flag
+        |         ,c_birth_country
+        |         ,c_login
+        |         ,c_email_address
+        |         ,d_year
+        | union all
+        | select c_customer_id customer_id
+        |       ,c_first_name customer_first_name
+        |       ,c_last_name customer_last_name
+        |       ,c_preferred_cust_flag customer_preferred_cust_flag
+        |       ,c_birth_country customer_birth_country
+        |       ,c_login customer_login
+        |       ,c_email_address customer_email_address
+        |       ,d_year dyear
+        |       ,sum(ws_ext_list_price-ws_ext_discount_amt) year_total
+        |       ,'w' sale_type
+        | from customer, web_sales, date_dim
+        | where c_customer_sk = ws_bill_customer_sk
+        |   and ws_sold_date_sk = d_date_sk
+        | group by
+        |    c_customer_id, c_first_name, c_last_name, c_preferred_cust_flag, c_birth_country,
+        |    c_login, c_email_address, d_year)
+        | select
+        |    t_s_secyear.customer_preferred_cust_flag
+        | from year_total t_s_firstyear
+        |     ,year_total t_s_secyear
+        |     ,year_total t_w_firstyear
+        |     ,year_total t_w_secyear
+        | where t_s_secyear.customer_id = t_s_firstyear.customer_id
+        |         and t_s_firstyear.customer_id = t_w_secyear.customer_id
+        |         and t_s_firstyear.customer_id = t_w_firstyear.customer_id
+        |         and t_s_firstyear.sale_type = 's'
+        |         and t_w_firstyear.sale_type = 'w'
+        |         and t_s_secyear.sale_type = 's'
+        |         and t_w_secyear.sale_type = 'w'
+        |         and t_s_firstyear.dyear = 2001
+        |         and t_s_secyear.dyear = 2001+1
+        |         and t_w_firstyear.dyear = 2001
+        |         and t_w_secyear.dyear = 2001+1
+        |         and t_s_firstyear.year_total > 0
+        |         and t_w_firstyear.year_total > 0
+        |         and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end
+        |             > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end
+        | order by t_s_secyear.customer_preferred_cust_flag
+        | LIMIT 100
+       """.stripMargin.trim,
+       """
+        |SELECT gen_subquery_1.`customer_preferred_cust_flag` 
+        |FROM
+        |  (SELECT gen_subsumer_0.`customer_id` AS `customer_id`, sum(gen_subsumer_0.`year_total1`) AS `year_total` 
+        |  FROM
+        |    (SELECT `c_customer_id` AS `customer_id`, `c_first_name` AS `customer_first_name`, `c_last_name` AS `customer_last_name`, `c_preferred_cust_flag` AS `customer_preferred_cust_flag`, `c_birth_country` AS `customer_birth_country`, `c_login` AS `customer_login`, `c_email_address` AS `customer_email_address`, `d_year` AS `dyear`, `d_date` AS `ddate`, sum((CAST((((CAST(`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(`ss_ext_wholesale_cost` AS DECIMAL(8,2))) - CAST(`ss_ext_discount_amt` AS DECIMAL(8,2))) + CAST(`ss_ext_sales_price` AS DECIMAL(8,2))) AS DECIMAL(12,2)) / 2.00BD)) AS `year_total`, sum((CAST(`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(`ss_ext_discount_amt` AS DECIMAL(8,2)))) AS `year_total1`, sum(`ss_net_paid`) AS `year_total_74`, 's' AS `sale_type` 
+        |    FROM
+        |      customer
+        |      INNER JOIN store_sales ON (`c_customer_sk` = `ss_customer_sk`)
+        |      INNER JOIN date_dim ON (`ss_sold_date_sk` = `d_date_sk`)
+        |    GROUP BY `c_customer_id`, `c_first_name`, `c_last_name`, `c_preferred_cust_flag`, `c_birth_country`, `c_login`, `c_email_address`, `d_year`, `d_date`) gen_subsumer_0 
+        |  WHERE
+        |    (gen_subsumer_0.`dyear` = 2001)
+        |  GROUP BY gen_subsumer_0.`customer_id`, gen_subsumer_0.`customer_first_name`, gen_subsumer_0.`customer_last_name`, gen_subsumer_0.`dyear`, gen_subsumer_0.`customer_preferred_cust_flag`, gen_subsumer_0.`customer_birth_country`, gen_subsumer_0.`customer_login`, gen_subsumer_0.`customer_email_address`
+        |  HAVING (sum(gen_subsumer_0.`year_total1`) > 0.00BD)
+        |  UNION ALL
+        |  SELECT customer.`c_customer_id` AS `customer_id`, sum((CAST(web_sales.`ws_ext_list_price` AS DECIMAL(8,2)) - CAST(web_sales.`ws_ext_discount_amt` AS DECIMAL(8,2)))) AS `year_total` 
+        |  FROM
+        |    customer
+        |    INNER JOIN web_sales ON (customer.`c_customer_sk` = web_sales.`ws_bill_customer_sk`)
+        |    INNER JOIN date_dim ON (web_sales.`ws_sold_date_sk` = date_dim.`d_date_sk`)
+        |  WHERE
+        |    false
+        |  GROUP BY customer.`c_customer_id`, customer.`c_first_name`, customer.`c_last_name`, customer.`c_preferred_cust_flag`, customer.`c_birth_country`, customer.`c_login`, customer.`c_email_address`, date_dim.`d_year`
+        |  HAVING (`year_total` > 0.00BD)) gen_subquery_0 
+        |  INNER JOIN (SELECT gen_subsumer_1.`customer_id` AS `customer_id`, gen_subsumer_1.`customer_preferred_cust_flag` AS `customer_preferred_cust_flag`, sum(gen_subsumer_1.`year_total1`) AS `year_total` 
+        |  FROM
+        |    (SELECT `c_customer_id` AS `customer_id`, `c_first_name` AS `customer_first_name`, `c_last_name` AS `customer_last_name`, `c_preferred_cust_flag` AS `customer_preferred_cust_flag`, `c_birth_country` AS `customer_birth_country`, `c_login` AS `customer_login`, `c_email_address` AS `customer_email_address`, `d_year` AS `dyear`, `d_date` AS `ddate`, sum((CAST((((CAST(`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(`ss_ext_wholesale_cost` AS DECIMAL(8,2))) - CAST(`ss_ext_discount_amt` AS DECIMAL(8,2))) + CAST(`ss_ext_sales_price` AS DECIMAL(8,2))) AS DECIMAL(12,2)) / 2.00BD)) AS `year_total`, sum((CAST(`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(`ss_ext_discount_amt` AS DECIMAL(8,2)))) AS `year_total1`, sum(`ss_net_paid`) AS `year_total_74`, 's' AS `sale_type` 
+        |    FROM
+        |      customer
+        |      INNER JOIN store_sales ON (`c_customer_sk` = `ss_customer_sk`)
+        |      INNER JOIN date_dim ON (`ss_sold_date_sk` = `d_date_sk`)
+        |    GROUP BY `c_customer_id`, `c_first_name`, `c_last_name`, `c_preferred_cust_flag`, `c_birth_country`, `c_login`, `c_email_address`, `d_year`, `d_date`) gen_subsumer_1 
+        |  WHERE
+        |    (gen_subsumer_1.`dyear` = 2002)
+        |  GROUP BY gen_subsumer_1.`customer_id`, gen_subsumer_1.`customer_first_name`, gen_subsumer_1.`customer_last_name`, gen_subsumer_1.`dyear`, gen_subsumer_1.`customer_preferred_cust_flag`, gen_subsumer_1.`customer_birth_country`, gen_subsumer_1.`customer_login`, gen_subsumer_1.`customer_email_address`
+        |  UNION ALL
+        |  SELECT customer.`c_customer_id` AS `customer_id`, customer.`c_preferred_cust_flag` AS `customer_preferred_cust_flag`, sum((CAST(web_sales.`ws_ext_list_price` AS DECIMAL(8,2)) - CAST(web_sales.`ws_ext_discount_amt` AS DECIMAL(8,2)))) AS `year_total` 
+        |  FROM
+        |    customer
+        |    INNER JOIN web_sales ON (customer.`c_customer_sk` = web_sales.`ws_bill_customer_sk`)
+        |    INNER JOIN date_dim ON (web_sales.`ws_sold_date_sk` = date_dim.`d_date_sk`)
+        |  WHERE
+        |    false
+        |  GROUP BY customer.`c_customer_id`, customer.`c_first_name`, customer.`c_last_name`, customer.`c_preferred_cust_flag`, customer.`c_birth_country`, customer.`c_login`, customer.`c_email_address`, date_dim.`d_year`) gen_subquery_1  ON (gen_subquery_1.`customer_id` = gen_subquery_0.`customer_id`)
+        |  INNER JOIN (SELECT gen_subsumer_2.`customer_id` AS `customer_id`, sum(gen_subsumer_2.`year_total1`) AS `year_total` 
+        |  FROM
+        |    (SELECT `c_customer_id` AS `customer_id`, `c_first_name` AS `customer_first_name`, `c_last_name` AS `customer_last_name`, `c_preferred_cust_flag` AS `customer_preferred_cust_flag`, `c_birth_country` AS `customer_birth_country`, `c_login` AS `customer_login`, `c_email_address` AS `customer_email_address`, `d_year` AS `dyear`, `d_date` AS `ddate`, sum((CAST((((CAST(`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(`ss_ext_wholesale_cost` AS DECIMAL(8,2))) - CAST(`ss_ext_discount_amt` AS DECIMAL(8,2))) + CAST(`ss_ext_sales_price` AS DECIMAL(8,2))) AS DECIMAL(12,2)) / 2.00BD)) AS `year_total`, sum((CAST(`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(`ss_ext_discount_amt` AS DECIMAL(8,2)))) AS `year_total1`, sum(`ss_net_paid`) AS `year_total_74`, 's' AS `sale_type` 
+        |    FROM
+        |      customer
+        |      INNER JOIN store_sales ON (`c_customer_sk` = `ss_customer_sk`)
+        |      INNER JOIN date_dim ON (`ss_sold_date_sk` = `d_date_sk`)
+        |    GROUP BY `c_customer_id`, `c_first_name`, `c_last_name`, `c_preferred_cust_flag`, `c_birth_country`, `c_login`, `c_email_address`, `d_year`, `d_date`) gen_subsumer_2 
+        |  WHERE
+        |    false
+        |  GROUP BY gen_subsumer_2.`customer_id`, gen_subsumer_2.`customer_first_name`, gen_subsumer_2.`customer_last_name`, gen_subsumer_2.`dyear`, gen_subsumer_2.`customer_preferred_cust_flag`, gen_subsumer_2.`customer_birth_country`, gen_subsumer_2.`customer_login`, gen_subsumer_2.`customer_email_address`
+        |  HAVING (sum(gen_subsumer_2.`year_total1`) > 0.00BD)
+        |  UNION ALL
+        |  SELECT customer.`c_customer_id` AS `customer_id`, sum((CAST(web_sales.`ws_ext_list_price` AS DECIMAL(8,2)) - CAST(web_sales.`ws_ext_discount_amt` AS DECIMAL(8,2)))) AS `year_total` 
+        |  FROM
+        |    customer
+        |    INNER JOIN web_sales ON (customer.`c_customer_sk` = web_sales.`ws_bill_customer_sk`)
+        |    INNER JOIN date_dim ON (date_dim.`d_year` = 2001) AND (web_sales.`ws_sold_date_sk` = date_dim.`d_date_sk`)
+        |  GROUP BY customer.`c_customer_id`, customer.`c_first_name`, customer.`c_last_name`, customer.`c_preferred_cust_flag`, customer.`c_birth_country`, customer.`c_login`, customer.`c_email_address`, date_dim.`d_year`
+        |  HAVING (`year_total` > 0.00BD)) gen_subquery_2  ON (gen_subquery_0.`customer_id` = gen_subquery_2.`customer_id`)
+        |  INNER JOIN (SELECT gen_subsumer_3.`customer_id` AS `customer_id`, sum(gen_subsumer_3.`year_total1`) AS `year_total` 
+        |  FROM
+        |    (SELECT `c_customer_id` AS `customer_id`, `c_first_name` AS `customer_first_name`, `c_last_name` AS `customer_last_name`, `c_preferred_cust_flag` AS `customer_preferred_cust_flag`, `c_birth_country` AS `customer_birth_country`, `c_login` AS `customer_login`, `c_email_address` AS `customer_email_address`, `d_year` AS `dyear`, `d_date` AS `ddate`, sum((CAST((((CAST(`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(`ss_ext_wholesale_cost` AS DECIMAL(8,2))) - CAST(`ss_ext_discount_amt` AS DECIMAL(8,2))) + CAST(`ss_ext_sales_price` AS DECIMAL(8,2))) AS DECIMAL(12,2)) / 2.00BD)) AS `year_total`, sum((CAST(`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(`ss_ext_discount_amt` AS DECIMAL(8,2)))) AS `year_total1`, sum(`ss_net_paid`) AS `year_total_74`, 's' AS `sale_type` 
+        |    FROM
+        |      customer
+        |      INNER JOIN store_sales ON (`c_customer_sk` = `ss_customer_sk`)
+        |      INNER JOIN date_dim ON (`ss_sold_date_sk` = `d_date_sk`)
+        |    GROUP BY `c_customer_id`, `c_first_name`, `c_last_name`, `c_preferred_cust_flag`, `c_birth_country`, `c_login`, `c_email_address`, `d_year`, `d_date`) gen_subsumer_3 
+        |  WHERE
+        |    false
+        |  GROUP BY gen_subsumer_3.`customer_id`, gen_subsumer_3.`customer_first_name`, gen_subsumer_3.`customer_last_name`, gen_subsumer_3.`dyear`, gen_subsumer_3.`customer_preferred_cust_flag`, gen_subsumer_3.`customer_birth_country`, gen_subsumer_3.`customer_login`, gen_subsumer_3.`customer_email_address`
+        |  UNION ALL
+        |  SELECT customer.`c_customer_id` AS `customer_id`, sum((CAST(web_sales.`ws_ext_list_price` AS DECIMAL(8,2)) - CAST(web_sales.`ws_ext_discount_amt` AS DECIMAL(8,2)))) AS `year_total` 
+        |  FROM
+        |    customer
+        |    INNER JOIN web_sales ON (customer.`c_customer_sk` = web_sales.`ws_bill_customer_sk`)
+        |    INNER JOIN date_dim ON (date_dim.`d_year` = 2002) AND (web_sales.`ws_sold_date_sk` = date_dim.`d_date_sk`)
+        |  GROUP BY customer.`c_customer_id`, customer.`c_first_name`, customer.`c_last_name`, customer.`c_preferred_cust_flag`, customer.`c_birth_country`, customer.`c_login`, customer.`c_email_address`, date_dim.`d_year`) gen_subquery_3 
+        |WHERE
+        |  (gen_subquery_0.`customer_id` = gen_subquery_3.`customer_id`) AND (CASE WHEN (gen_subquery_2.`year_total` > 0.00BD) THEN (gen_subquery_3.`year_total` / gen_subquery_2.`year_total`) ELSE CAST(NULL AS DECIMAL(38,20)) END > CASE WHEN (gen_subquery_0.`year_total` > 0.00BD) THEN (gen_subquery_1.`year_total` / gen_subquery_0.`year_total`) ELSE CAST(NULL AS DECIMAL(38,20)) END)
+        |ORDER BY gen_subquery_1.`customer_preferred_cust_flag` ASC NULLS FIRST
+        |LIMIT 100
+        """.stripMargin.trim),
+      //q38
+      ("case_19",
+       """
+        | SELECT c_customer_id customer_id,
+        |        c_first_name customer_first_name,
+        |        c_last_name customer_last_name,
+        |        c_preferred_cust_flag customer_preferred_cust_flag,
+        |        c_birth_country customer_birth_country,
+        |        c_login customer_login,
+        |        c_email_address customer_email_address,
+        |        d_year dyear,
+        |        d_date ddate,
+        |        sum(((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2) year_total,
+        |        sum(ss_ext_list_price-ss_ext_discount_amt) year_total1,
+        |        sum(ss_net_paid) year_total_74,
+        |        's' sale_type
+        | FROM customer, store_sales, date_dim
+        | WHERE c_customer_sk = ss_customer_sk AND ss_sold_date_sk = d_date_sk
+        | GROUP BY c_customer_id,
+        |          c_first_name,
+        |          c_last_name,
+        |          c_preferred_cust_flag,
+        |          c_birth_country,
+        |          c_login,
+        |          c_email_address,
+        |          d_year,
+        |          d_date
+       """.stripMargin.trim,
+       """
+        | select count(*) from (
+        |    select distinct c_last_name, c_first_name, d_date
+        |    from store_sales, date_dim, customer
+        |          where store_sales.ss_sold_date_sk = date_dim.d_date_sk
+        |      and store_sales.ss_customer_sk = customer.c_customer_sk
+        |      and d_month_seq between 1200 and  1200 + 11
+        |  intersect
+        |    select distinct c_last_name, c_first_name, d_date
+        |    from catalog_sales, date_dim, customer
+        |          where catalog_sales.cs_sold_date_sk = date_dim.d_date_sk
+        |      and catalog_sales.cs_bill_customer_sk = customer.c_customer_sk
+        |      and d_month_seq between  1200 and  1200 + 11
+        |  intersect
+        |    select distinct c_last_name, c_first_name, d_date
+        |    from web_sales, date_dim, customer
+        |          where web_sales.ws_sold_date_sk = date_dim.d_date_sk
+        |      and web_sales.ws_bill_customer_sk = customer.c_customer_sk
+        |      and d_month_seq between  1200 and  1200 + 11
+        | ) hot_cust
+        | limit 100
+       """.stripMargin.trim,
+       """
+        |
+        |
+        |
+        """.stripMargin.trim),
+      //q74
+      ("case_20",
+       """
+        | SELECT c_customer_id customer_id,
+        |        c_first_name customer_first_name,
+        |        c_last_name customer_last_name,
+        |        c_preferred_cust_flag customer_preferred_cust_flag,
+        |        c_birth_country customer_birth_country,
+        |        c_login customer_login,
+        |        c_email_address customer_email_address,
+        |        d_year dyear,
+        |        d_date ddate,
+        |        d_month_seq,
+        |        sum(((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2) year_total,
+        |        sum(ss_net_paid) year_total_74,
+        |        's' sale_type
+        | FROM customer, store_sales, date_dim
+        | WHERE ss_customer_sk = c_customer_sk AND ss_sold_date_sk = d_date_sk
+        | GROUP BY c_customer_id,
+        |          c_first_name,
+        |          c_last_name,
+        |          c_preferred_cust_flag,
+        |          c_birth_country,
+        |          c_login,
+        |          c_email_address,
+        |          d_year,
+        |          d_date,
+        |          d_month_seq
+       """.stripMargin.trim,
+       """
+        | with year_total as (
+        | select
+        |    c_customer_id customer_id, c_first_name customer_first_name,
+        |    c_last_name customer_last_name, d_year as year,
+        |    sum(ss_net_paid) year_total, 's' sale_type
+        | from
+        |    customer, store_sales, date_dim
+        | where c_customer_sk = ss_customer_sk
+        |    and ss_sold_date_sk = d_date_sk
+        |    and d_year in (2001,2001+1)
+        | group by
+        |    c_customer_id, c_first_name, c_last_name, d_year
+        | union all
+        | select
+        |    c_customer_id customer_id, c_first_name customer_first_name,
+        |    c_last_name customer_last_name, d_year as year,
+        |    sum(ws_net_paid) year_total, 'w' sale_type
+        | from
+        |    customer, web_sales, date_dim
+        | where c_customer_sk = ws_bill_customer_sk
+        |    and ws_sold_date_sk = d_date_sk
+        |    and d_year in (2001,2001+1)
+        | group by
+        |    c_customer_id, c_first_name, c_last_name, d_year)
+        | select
+        |    t_s_secyear.customer_id, t_s_secyear.customer_first_name, t_s_secyear.customer_last_name
+        | from
+        |    year_total t_s_firstyear, year_total t_s_secyear,
+        |    year_total t_w_firstyear, year_total t_w_secyear
+        | where t_s_secyear.customer_id = t_s_firstyear.customer_id
+        |    and t_s_firstyear.customer_id = t_w_secyear.customer_id
+        |    and t_s_firstyear.customer_id = t_w_firstyear.customer_id
+        |    and t_s_firstyear.sale_type = 's'
+        |    and t_w_firstyear.sale_type = 'w'
+        |    and t_s_secyear.sale_type = 's'
+        |    and t_w_secyear.sale_type = 'w'
+        |    and t_s_firstyear.year = 2001
+        |    and t_s_secyear.year = 2001+1
+        |    and t_w_firstyear.year = 2001
+        |    and t_w_secyear.year = 2001+1
+        |    and t_s_firstyear.year_total > 0
+        |    and t_w_firstyear.year_total > 0
+        |    and case when t_w_firstyear.year_total > 0 then t_w_secyear.year_total / t_w_firstyear.year_total else null end
+        |      > case when t_s_firstyear.year_total > 0 then t_s_secyear.year_total / t_s_firstyear.year_total else null end
+        | order by 1, 1, 1
+        | limit 100
+       """.stripMargin.trim,
+       """
+        |SELECT gen_subquery_1.`customer_id`, gen_subquery_1.`customer_first_name`, gen_subquery_1.`customer_last_name` 
+        |FROM
+        |  (SELECT gen_subsumer_0.`customer_id` AS `customer_id`, gen_subsumer_0.`year_total_74` AS `year_total` 
+        |  FROM
+        |    (SELECT customer.`c_customer_id` AS `customer_id`, customer.`c_first_name` AS `customer_first_name`, customer.`c_last_name` AS `customer_last_name`, customer.`c_preferred_cust_flag` AS `customer_preferred_cust_flag`, customer.`c_birth_country` AS `customer_birth_country`, customer.`c_login` AS `customer_login`, customer.`c_email_address` AS `customer_email_address`, date_dim.`d_year` AS `dyear`, date_dim.`d_date` AS `ddate`, date_dim.`d_month_seq`, sum((CAST((((CAST(store_sales.`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(store_sales.`ss_ext_wholesale_cost` AS DECIMAL(8,2))) - CAST(store_sales.`ss_ext_discount_amt` AS DECIMAL(8,2))) + CAST(store_sales.`ss_ext_sales_price` AS DECIMAL(8,2))) AS DECIMAL(12,2)) / 2.00BD)) AS `year_total`, makedecimal(sum(unscaledvalue(store_sales.`ss_net_paid`))) AS `year_total_74`, 's' AS `sale_type` 
+        |    FROM
+        |      customer
+        |      INNER JOIN store_sales ON (store_sales.`ss_customer_sk` = customer.`c_customer_sk`)
+        |      INNER JOIN date_dim ON (store_sales.`ss_sold_date_sk` = date_dim.`d_date_sk`)
+        |    GROUP BY customer.`c_customer_id`, customer.`c_first_name`, customer.`c_last_name`, customer.`c_preferred_cust_flag`, customer.`c_birth_country`, customer.`c_login`, customer.`c_email_address`, date_dim.`d_year`, date_dim.`d_date`, date_dim.`d_month_seq`) gen_subsumer_0 
+        |  WHERE
+        |    (gen_subsumer_0.`dyear` IN (2001, 2002)) AND (gen_subsumer_0.`dyear` = 2001)
+        |  GROUP BY gen_subsumer_0.`customer_id`, gen_subsumer_0.`customer_first_name`, gen_subsumer_0.`customer_last_name`, gen_subsumer_0.`dyear`
+        |  HAVING (gen_subsumer_0.`year_total_74` > 0.00BD)
+        |  UNION ALL
+        |  SELECT customer.`c_customer_id` AS `customer_id`, makedecimal(sum(unscaledvalue(web_sales.`ws_net_paid`))) AS `year_total` 
+        |  FROM
+        |    customer
+        |    INNER JOIN web_sales ON (customer.`c_customer_sk` = web_sales.`ws_bill_customer_sk`)
+        |    INNER JOIN date_dim ON (date_dim.`d_year` IN (2001, 2002)) AND (web_sales.`ws_sold_date_sk` = date_dim.`d_date_sk`)
+        |  WHERE
+        |    false
+        |  GROUP BY customer.`c_customer_id`, customer.`c_first_name`, customer.`c_last_name`, date_dim.`d_year`
+        |  HAVING (`year_total` > 0.00BD)) gen_subquery_0 
+        |  INNER JOIN (SELECT gen_subsumer_1.`customer_id` AS `customer_id`, gen_subsumer_1.`customer_first_name` AS `customer_first_name`, gen_subsumer_1.`customer_last_name` AS `customer_last_name`, gen_subsumer_1.`year_total_74` AS `year_total` 
+        |  FROM
+        |    (SELECT customer.`customer_id` AS `customer_id`, customer.`customer_first_name` AS `customer_first_name`, customer.`customer_last_name` AS `customer_last_name`, customer.`customer_preferred_cust_flag` AS `customer_preferred_cust_flag`, customer.`customer_birth_country` AS `customer_birth_country`, customer.`customer_login` AS `customer_login`, customer.`customer_email_address` AS `customer_email_address`, date_dim.`dyear` AS `dyear`, date_dim.`ddate` AS `ddate`, date_dim.`d_month_seq`, sum((CAST((((CAST(store_sales.`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(store_sales.`ss_ext_wholesale_cost` AS DECIMAL(8,2))) - CAST(store_sales.`ss_ext_discount_amt` AS DECIMAL(8,2))) + CAST(store_sales.`ss_ext_sales_price` AS DECIMAL(8,2))) AS DECIMAL(12,2)) / 2.00BD)) AS `year_total`, makedecimal(sum(unscaledvalue(store_sales.`ss_net_paid`))) AS `year_total_74`, 's' AS `sale_type` 
+        |    FROM
+        |      customer
+        |      INNER JOIN store_sales ON (store_sales.`ss_customer_sk` = customer.`c_customer_sk`)
+        |      INNER JOIN date_dim ON (store_sales.`ss_sold_date_sk` = date_dim.`d_date_sk`)
+        |    GROUP BY customer.`customer_id`, customer.`customer_first_name`, customer.`customer_last_name`, customer.`customer_preferred_cust_flag`, customer.`customer_birth_country`, customer.`customer_login`, customer.`customer_email_address`, date_dim.`dyear`, date_dim.`ddate`, date_dim.`d_month_seq`) gen_subsumer_1 
+        |  WHERE
+        |    (gen_subsumer_1.`dyear` IN (2001, 2002)) AND (gen_subsumer_1.`dyear` = 2002)
+        |  GROUP BY gen_subsumer_1.`customer_id`, gen_subsumer_1.`customer_first_name`, gen_subsumer_1.`customer_last_name`, gen_subsumer_1.`dyear`
+        |  UNION ALL
+        |  SELECT customer.`c_customer_id` AS `customer_id`, customer.`c_first_name` AS `customer_first_name`, customer.`c_last_name` AS `customer_last_name`, makedecimal(sum(unscaledvalue(web_sales.`ws_net_paid`))) AS `year_total` 
+        |  FROM
+        |    customer
+        |    INNER JOIN web_sales ON (customer.`c_customer_sk` = web_sales.`ws_bill_customer_sk`)
+        |    INNER JOIN date_dim ON (date_dim.`d_year` IN (2001, 2002)) AND (web_sales.`ws_sold_date_sk` = date_dim.`d_date_sk`)
+        |  WHERE
+        |    false
+        |  GROUP BY customer.`c_customer_id`, customer.`c_first_name`, customer.`c_last_name`, date_dim.`d_year`) gen_subquery_1  ON (gen_subquery_1.`customer_id` = gen_subquery_0.`customer_id`)
+        |  INNER JOIN (SELECT gen_subsumer_2.`customer_id` AS `customer_id`, gen_subsumer_2.`year_total_74` AS `year_total` 
+        |  FROM
+        |    (SELECT customer.`c_customer_id` AS `customer_id`, customer.`c_first_name` AS `customer_first_name`, customer.`c_last_name` AS `customer_last_name`, customer.`c_preferred_cust_flag` AS `customer_preferred_cust_flag`, customer.`c_birth_country` AS `customer_birth_country`, customer.`c_login` AS `customer_login`, customer.`c_email_address` AS `customer_email_address`, date_dim.`d_year` AS `dyear`, date_dim.`d_date` AS `ddate`, date_dim.`d_month_seq`, sum((CAST((((CAST(store_sales.`ss_ext_list_price` AS DECIMAL(8,2)) - CAST(store_sales.`ss_ext_wholesale_cost` AS DECIMAL(8,2))) - CAST(store_sales.`ss_ext_discount_amt` AS DECIMAL(8,2))) + CAST(store_sales.`ss_ext_sales_price` AS DECIMAL(8,2))) AS DECIMAL(12,2)) / 2.00BD)) AS `year_total`, makedecimal(sum(unscaledvalue(store_sales.`ss_net_paid`))) AS `y

<TRUNCATED>

[36/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/integration/spark-common-test/src/test/resources/tpch/supplier.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/tpch/supplier.csv b/integration/spark-common-test/src/test/resources/tpch/supplier.csv
new file mode 100644
index 0000000..0f20c29
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/tpch/supplier.csv
@@ -0,0 +1,1000 @@
+1|Supplier#000000001| N kD4on9OM Ipw3,gf0JBoQDd7tgrzrddZ|17|27-918-335-1736|5755.94|each slyly above the careful|
+2|Supplier#000000002|89eJ5ksX3ImxJQBvxObC,|5|15-679-861-2259|4032.68| slyly bold instructions. idle dependen|
+3|Supplier#000000003|q1,G3Pj6OjIuUYfUoH18BFTKP5aU9bEV3|1|11-383-516-1199|4192.40|blithely silent requests after the express dependencies are sl|
+4|Supplier#000000004|Bk7ah4CK8SYQTepEmvMkkgMwg|15|25-843-787-7479|4641.08|riously even requests above the exp|
+5|Supplier#000000005|Gcdm2rJRzl5qlTVzc|11|21-151-690-3663|-283.84|. slyly regular pinto bea|
+6|Supplier#000000006|tQxuVm7s7CnK|14|24-696-997-4969|1365.79|final accounts. regular dolphins use against the furiously ironic decoys. |
+7|Supplier#000000007|s,4TicNGB4uO6PaSqNBUq|23|33-990-965-2201|6820.35|s unwind silently furiously regular courts. final requests are deposits. requests wake quietly blit|
+8|Supplier#000000008|9Sq4bBH2FQEmaFOocY45sRTxo6yuoG|17|27-498-742-3860|7627.85|al pinto beans. asymptotes haggl|
+9|Supplier#000000009|1KhUgZegwM3ua7dsYmekYBsK|10|20-403-398-8662|5302.37|s. unusual, even requests along the furiously regular pac|
+10|Supplier#000000010|Saygah3gYWMp72i PY|24|34-852-489-8585|3891.91|ing waters. regular requests ar|
+11|Supplier#000000011|JfwTs,LZrV, M,9C|18|28-613-996-1505|3393.08|y ironic packages. slyly ironic accounts affix furiously; ironically unusual excuses across the flu|
+12|Supplier#000000012|aLIW  q0HYd|8|18-179-925-7181|1432.69|al packages nag alongside of the bold instructions. express, daring accounts|
+13|Supplier#000000013|HK71HQyWoqRWOX8GI FpgAifW,2PoH|3|13-727-620-7813|9107.22|requests engage regularly instructions. furiously special requests ar|
+14|Supplier#000000014|EXsnO5pTNj4iZRm|15|25-656-247-5058|9189.82|l accounts boost. fluffily bold warhorses wake|
+15|Supplier#000000015|olXVbNBfVzRqgokr1T,Ie|8|18-453-357-6394|308.56| across the furiously regular platelets wake even deposits. quickly express she|
+16|Supplier#000000016|YjP5C55zHDXL7LalK27zfQnwejdpin4AMpvh|22|32-822-502-4215|2972.26|ously express ideas haggle quickly dugouts? fu|
+17|Supplier#000000017|c2d,ESHRSkK3WYnxpgw6aOqN0q|19|29-601-884-9219|1687.81|eep against the furiously bold ideas. fluffily bold packa|
+18|Supplier#000000018|PGGVE5PWAMwKDZw |16|26-729-551-1115|7040.82|accounts snooze slyly furiously bold |
+19|Supplier#000000019|edZT3es,nBFD8lBXTGeTl|24|34-278-310-2731|6150.38|refully final foxes across the dogged theodolites sleep slyly abou|
+20|Supplier#000000020|iybAE,RmTymrZVYaFZva2SH,j|3|13-715-945-6730|530.82|n, ironic ideas would nag blithely about the slyly regular accounts. silent, expr|
+21|Supplier#000000021|81CavellcrJ0PQ3CPBID0Z0JwyJm0ka5igEs|2|12-253-590-5816|9365.80|d. instructions integrate sometimes slyly pending instructions. accounts nag among the |
+22|Supplier#000000022|okiiQFk 8lm6EVX6Q0,bEcO|4|14-144-830-2814|-966.20| ironically among the deposits. closely expre|
+23|Supplier#000000023|ssetugTcXc096qlD7 2TL5crEEeS3zk|9|19-559-422-5776|5926.41|ges could have to are ironic deposits. regular, even request|
+24|Supplier#000000024|C4nPvLrVmKPPabFCj|0|10-620-939-2254|9170.71|usly pending deposits. slyly final accounts run |
+25|Supplier#000000025|RCQKONXMFnrodzz6w7fObFVV6CUm2q|22|32-431-945-3541|9198.31|ely regular deposits. carefully regular sauternes engage furiously above the regular accounts. idly |
+26|Supplier#000000026|iV,MHzAx6Z939uzFNkq09M0a1 MBfH7|21|31-758-894-4436|21.18| ideas poach carefully after the blithely bold asymptotes. furiously pending theodoli|
+27|Supplier#000000027|lC4CjKwNHUr6L4xIpzOBK4NlHkFTg|18|28-708-999-2028|1887.62|s according to the quickly regular hockey playe|
+28|Supplier#000000028|GBhvoRh,7YIN V|0|10-538-384-8460|-891.99|ld requests across the pinto beans are carefully against the quickly final courts. accounts sleep |
+29|Supplier#000000029|658tEqXLPvRd6xpFdqC2|1|11-555-705-5922|-811.62|y express ideas play furiously. even accounts sleep fluffily across the accounts. careful|
+30|Supplier#000000030|84NmC1rmQfO0fj3zkobLT|16|26-940-594-4852|8080.14|ias. carefully silent accounts cajole blithely. pending, special accounts cajole quickly above the f|
+31|Supplier#000000031|fRJimA7zchyApqRLHcQeocVpP|16|26-515-530-4159|5916.91|into beans wake after the special packages. slyly fluffy requests cajole furio|
+32|Supplier#000000032|yvoD3TtZSx1skQNCK8agk5bZlZLug|23|33-484-637-7873|3556.47|usly even depths. quickly ironic theodolites s|
+33|Supplier#000000033|gfeKpYw3400L0SDywXA6Ya1Qmq1w6YB9f3R|7|17-138-897-9374|8564.12|n sauternes along the regular asymptotes are regularly along the |
+34|Supplier#000000034|mYRe3KvA2O4lL4HhxDKkkrPUDPMKRCSp,Xpa|10|20-519-982-2343|237.31|eposits. slyly final deposits toward the slyly regular dependencies sleep among the excu|
+35|Supplier#000000035|QymmGXxjVVQ5OuABCXVVsu,4eF gU0Qc6|21|31-720-790-5245|4381.41| ironic deposits! final, bold platelets haggle quickly quickly pendin|
+36|Supplier#000000036|mzSpBBJvbjdx3UKTW3bLFewRD78D91lAC879|13|23-273-493-3679|2371.51|ular theodolites must haggle regular, bold accounts. slyly final pinto beans bo|
+37|Supplier#000000037|cqjyB5h1nV|0|10-470-144-1330|3017.47|iously final instructions. quickly special accounts hang fluffily above the accounts. deposits|
+38|Supplier#000000038|xEcx45vD0FXHT7c9mvWFY|4|14-361-296-6426|2512.41|ins. fluffily special accounts haggle slyly af|
+39|Supplier#000000039|ZM, nSYpEPWr1yAFHaC91qjFcijjeU5eH|8|18-851-856-5633|6115.65|le slyly requests. special packages shall are blithely. slyly unusual packages sleep |
+40|Supplier#000000040|zyIeWzbbpkTV37vm1nmSGBxSgd2Kp|22|32-231-247-6991|-290.06| final patterns. accounts haggle idly pas|
+41|Supplier#000000041|G 1FKHR435 wMKFmyt|18|28-739-447-2525|6942.67|odolites boost across the furiously regular fo|
+42|Supplier#000000042|1Y5lwEgpe3j2vbUBYj3SwLhK62JlwEMtDC|22|32-698-298-6317|6565.11| fluffily even requests cajole blithely fu|
+43|Supplier#000000043|Z5mLuAoTUEeKY5v22VnnA4D87Ao6jF2LvMYnlX8h|12|22-421-568-4862|7773.41|unts. unusual, final asymptotes |
+44|Supplier#000000044|kERxlLDnlIZJdN66zAPHklyL|7|17-713-930-5667|9759.38|x. carefully quiet account|
+45|Supplier#000000045|LcKnsa8XGtIO0WYSB7hkOrH rnzRg1|9|19-189-635-8862|2944.23|iously according to the ironic, silent accounts.|
+46|Supplier#000000046|e0URUXfDOYMdKe16Z5h5StMRbzGmTs,D2cjap|24|34-748-308-3215|3580.35|gular, regular ideas across th|
+47|Supplier#000000047|3XM1x,Pcxqw,HK4XNlgbnZMbLhBHLA|14|24-810-354-4471|2958.09|sly ironic deposits sleep carefully along t|
+48|Supplier#000000048|jg0U FNPMQDuyuKvTnLXXaLf3Wl6OtONA6mQlWJ|14|24-722-551-9498|5630.62|xpress instructions affix. fluffily even requests boos|
+49|Supplier#000000049|Nvq 6macF4GtJvz|24|34-211-567-6800|9915.24|the finally bold dependencies. dependencies after the fluffily final foxes boost fluffi|
+50|Supplier#000000050|rGobqSMMYz0ErrPhCGS|9|19-561-560-7437|4515.87|warhorses. ironic, regular accounts detect slyly after the quickly f|
+51|Supplier#000000051|rDkBXb01POIKjOwrij62uM8O4|9|19-475-537-1368|7241.40|ges nag at the blithely busy instructions. fluffy packages wake quickly. even, ironic ideas boost b|
+52|Supplier#000000052|WCk XCHYzBA1dvJDSol4ZJQQcQN,|19|29-974-934-4713|287.16|dolites are slyly against the furiously regular packages. ironic, final deposits cajole quickly|
+53|Supplier#000000053|i9v3 EsYCfLKFU6PIt8iihBOHBB37yR7b3GD7Rt|7|17-886-101-6083|6177.35|onic, special deposits wake furio|
+54|Supplier#000000054|J1s,Wxb5pg|12|22-966-435-7200|2733.69|blithely pending dolphins. quickly regular theodolites haggle slyly|
+55|Supplier#000000055|OqdYSiOQeG4eGi636Tj|24|34-876-912-6007|7162.15|kages. blithely even foxes cajole special, final accounts. blithely even dependencies r|
+56|Supplier#000000056|fUVtlUVal GiHBOuYoUQ XQ9NfNLQR3Gl|16|26-471-195-5486|-632.16| sleep special deposits. unusual requests wake blithely slyly regular ideas. |
+57|Supplier#000000057|bEWqUVRR f0mb2o18Y|17|27-681-514-6892|-831.07|detect according to the furiously br|
+58|Supplier#000000058|01dEADIZoCULZXg|16|26-659-969-5586|92.44|refully final foxes are. even, express courts according to the b|
+59|Supplier#000000059|N8lKbYjMnVlEHmTPRmBgtLiX8rrJx|17|27-249-395-9123|586.16|ffily along the even decoys. final instructions abov|
+60|Supplier#000000060|cb08ntDTARo47WmnBcYXu|8|18-550-360-2464|4515.80|thely express ideas use blithely |
+61|Supplier#000000061|Oz0M1qBR9I|8|18-396-489-9719|6096.58|s the slyly regular ideas shall |
+62|Supplier#000000062|bSmlFYUKBeRsqJxwC9 zS6xpFdEf5jNTb|19|29-603-653-2494|9202.57|ts. furiously ironic pinto beans are permanently after the bold ideas. regular, express f|
+63|Supplier#000000063|NlV0OQyIoPvPkw5AYuWGomX,hgqm1|5|15-781-401-3047|5742.03|ar deposits. blithely bold accounts against the slyly final pinto beans sleep about the exp|
+64|Supplier#000000064|w80JjnIP lGoLdUjRutbv81gGlqqpW4PQBeOtSYU|24|34-278-790-7004|1309.70|uickly regular requests use. carefully i|
+65|Supplier#000000065|BsAnHUmSFArppKrM|22|32-444-835-2434|-963.79|l ideas wake carefully around the regular packages. furiously ruthless pinto bea|
+66|Supplier#000000066|qYdruFJQJYYiKvnNVmYfCVydVB8bcW,AW,U6SOV3|23|33-300-836-9529|2455.98|ar requests. express orbits de|
+67|Supplier#000000067|7YrEKJncHFk5D W7ZaqfAXV|4|14-563-538-1657|3576.55|ray slyly final foxes. furio|
+68|Supplier#000000068|Ue6N50wH2CwE4PPgTGLmat,ibGYYlDoOb3xQwtgb|21|31-267-327-4328|5119.38|inal requests. ruthlessly ironic packages cajole|
+69|Supplier#000000069|T2Dl9,f97e333eRuMi2z |4|14-491-707-8310|8466.50|! carefully ironic instructions nag quickly pending requests. fluffily even deposits sleep a|
+70|Supplier#000000070|INWNH2w,OOWgNDq0BRCcBwOMQc6PdFDc4|6|16-821-608-1166|9508.37|ests sleep quickly express ideas. ironic ideas haggle about the final T|
+71|Supplier#000000071|YFo8an7P6wi Q|1|11-743-919-7272|8179.68| final accounts. bold, final escapades must have to cajole about the special platelets. fu|
+72|Supplier#000000072|mKpAJojtawk2alqV4 ZEbJ3PH3wfYqy AM8rGq1|18|28-113-898-6643|7014.50| theodolites sublate furiously about the regularly e|
+73|Supplier#000000073|HBZA1NHvrswQCxTTjg 5XrfSOGgMRKNCe2ovE|16|26-758-310-7496|3793.13|. never pending asymptotes a|
+74|Supplier#000000074|uM3yV5NOc6b5wNdpxF69CW 8QvDxqvKubRJtA|20|30-166-486-1559|4170.51|carefully along the quickly regular sentiments. ironic accounts sleep. regular deposits are blith|
+75|Supplier#000000075|7f3gN4rP1livII|18|28-716-704-8686|-224.84|eans. even, silent packages c|
+76|Supplier#000000076|JBhSBa3cLYvNgHUYtUHmtECCD|14|24-228-763-7840|2971.10|, even instructions. furiously unusual deposits wake slyly about the ev|
+77|Supplier#000000077|wVtcr0uH3CyrSiWMLsqnB09Syo,UuZxPMeBghlY|7|17-281-345-4863|4186.95|the slyly final asymptotes. blithely pending theodoli|
+78|Supplier#000000078|9y3OZ2CV hGrsrQxzB7V3zTtygHVHlG3SD6yrz|5|15-670-998-6860|1044.10|, regular packages wake quickly bold requests. carefully unusual requests about the unusual request|
+79|Supplier#000000079|p0u3tztSXUD2J8vFfLNFNKsrRRv7qyUtTBTA|14|24-402-227-9600|1191.94|nto beans integrate slyly across the fluffily pending multipliers. carefully ste|
+80|Supplier#000000080|cJ2MHSEJ13rIL2Wj3D5i6hRo30,ZiNUXhqn|21|31-646-289-1906|-40.45|ackages. blithely bold requests wake quickly. carefully regular foxes are slyly instructions. caref|
+81|Supplier#000000081|SLlacbhgpKmVa,gF3saYv12e0|12|22-535-310-6971|166.32|oost carefully quickly regular packages. carefully final excuses sleep blithely slyly pendi|
+82|Supplier#000000082|WyTKA7ZpF15t1aCNlT3|18|28-177-572-9691|-724.31|xpress multipliers wake furiously even foxes. furiously iro|
+83|Supplier#000000083|WRJUkzCn050seVz57oAfrbCuw|14|24-529-559-2461|1467.77|ly pending courts would cajole enticingly even deposits. slyly express|
+84|Supplier#000000084|DcYjWMiZGQqEKOJi4wAmIV08ikx|24|34-869-118-7803|4780.93|even depths. regular foxes use slyly. theod|
+85|Supplier#000000085|Ckls9RtlzKSF|7|17-167-806-8199|7174.74|egular packages. bold pinto beans wake fur|
+86|Supplier#000000086|J1fgg5QaqnN|19|29-903-665-7065|1883.37|cajole furiously special, final requests: furiously spec|
+87|Supplier#000000087|WCw7URDj8zoZ7tqC3cpm7|24|34-860-229-1674|4746.66|all are quickly after the ironic platelets. pending dolphins are. final the|
+88|Supplier#000000088|yOshY8wwzMgS|11|21-191-938-9469|3086.13|furiously special excuses aff|
+89|Supplier#000000089|fhtzZcSorhud1|9|19-259-876-1014|1638.02|en instructions across the slyly ironic requests engage|
+90|Supplier#000000090|bPE6Uhz1f2m3gwSGMrnRt,g,3gq37r5kxgphqss1|6|16-380-123-9217|6201.77|hely fluffily regular theodoli|
+91|Supplier#000000091|YV45D7TkfdQanOOZ7q9QxkyGUapU1oOWU6q3|3|13-604-986-9056|6255.87|nstructions use carefully according to the special packages: quickly silent th|
+92|Supplier#000000092|n48Wy4QI3lml8T217rk|2|12-701-432-8346|2470.84| even theodolites wake against the blithely fluffy packages|
+93|Supplier#000000093|wd1djjKXT,4zBm|16|26-528-528-1157|368.76|yly final accounts could are carefully. fluffily ironic instruct|
+94|Supplier#000000094|lK,pLPjAMVGJOXN80zPZuNQjpChliE|4|14-728-888-8882|2766.80|usly. furiously slow theodolites could haggle carefully fina|
+95|Supplier#000000095|p2 2hBfH5TD|19|29-142-539-1403|2588.83|sual instructions cajole slyly with the final ac|
+96|Supplier#000000096|Lbxp3WIipye o2wZme1i9iJx,xTt1Mp|12|22-888-654-7193|3437.24|efully unusual excuses wake fluffily regular theodo|
+97|Supplier#000000097|MrCQha2G6ndX1fp6CA|4|14-618-678-1789|6642.22|s, regular accounts. furiously bold ideas u|
+98|Supplier#000000098|ogHn8dpXB5Q|21|31-914-775-1978|5873.07|esias use slyly under the ironic foxes. re|
+99|Supplier#000000099|4SxkTHG28nZrtT0,MnVF9H|18|28-272-909-1617|3671.34|nic dependencies dazzle flu|
+100|Supplier#000000100|rIlN li8zvW22l2slbcx ECP4fL|21|31-351-324-5062|3191.70|es. regular instructions sleep carefully. slyly ironic packages across the foxes boost |
+101|Supplier#000000101|8KUTzVw32Pw3PD7 h,YJ0ysOoBtz9JfZI|11|21-970-795-5691|4901.77|ar foxes. carefully final ideas are. blithely regular deposits about the carefully regular package|
+102|Supplier#000000102|pDmsZ3V8W7NR1wW|8|18-884-693-5785|2980.07|ly even notornis nod furiously evenly regular platelets. thinly pending pat|
+103|Supplier#000000103|hBpQ4GYblzpHKTgATLPAS6ph3|2|12-442-523-5043|1050.66| accounts detect slyly bo|
+104|Supplier#000000104|Dcl4yGrzqv3OPeRO49bKh78XmQEDR7PBXIs0m|6|16-434-972-6922|1381.97|gular ideas. bravely bold deposits haggle through the carefully final deposits. slyly unusual idea|
+105|Supplier#000000105|cB YSy5Bla|4|14-951-800-2742|3754.75|sual requests haggle slyly. theodolites|
+106|Supplier#000000106|50EV3vyfAsWJAjTbT4qwU|21|31-810-990-4600|8091.65|eas affix carefully fluffily silent packages. regular deposits bo|
+107|Supplier#000000107|fqniA2vC1VZU5DZG2TBiN|16|26-958-723-2164|1378.93|thely special foxes nag carefully. requests cajole along the quickly ironic pinto b|
+108|Supplier#000000108|eL47lfhfMP7zRw|24|34-341-415-7878|7683.71|, regular packages! bold re|
+109|Supplier#000000109|4 63llZBLxtWRa A|21|31-405-311-8656|9202.82|al courts. unusual escapades cajol|
+110|Supplier#000000110|ehv9ObpyN0|15|25-824-874-9077|9804.10|eposits nag thinly furiously even accounts; permanent ideas nag fluf|
+111|Supplier#000000111|uv56H9j8cNa4qnflVSYbLNN|4|14-256-573-2660|6578.65|ing theodolites are about the slowly pending|
+112|Supplier#000000112|vdWe5lfgvisRCxdd85DTOZHqAzcuq7f7KKThA|12|22-617-876-1402|4332.95|es eat fluffily bold deposits. furiously unusual i|
+113|Supplier#000000113|5 YOpqbaHs7dR gG4EmXrI7XtA7DcnRMsWPU1z2D|11|21-211-117-1937|1882.05|blithely regular courts wake quickly even pl|
+114|Supplier#000000114|Uvz iykaBYOC|20|30-998-334-7841|4559.18|jole slyly blithely regular requests-- even requests haggle alongsid|
+115|Supplier#000000115|nJ 2t0f7Ve,wL1,6WzGBJLNBUCKlsV|23|33-597-248-1220|9192.10|es across the carefully express accounts boost caref|
+116|Supplier#000000116|gABGPfNYwB 9g1rMYWSAhpo 4|13|23-188-305-1829|5751.39|xcuses wake quickly above the regular packages; s|
+117|Supplier#000000117|ZnlLNC,8YN10T4mjI8eq5bJ|18|28-470-879-3141|4589.18|ymptotes. blithely regular theodolites are slyly according to the |
+118|Supplier#000000118|BYtvNtFpQAHHoBFWF|0|10-475-868-5521|7317.43|y ironic theodolites. furiously bold ideas use along t|
+119|Supplier#000000119|4CxBrM0o4yt6LYFxZIyZ89Xnf8LZNn6KcYc|18|28-558-264-1202|2060.13|ctions: quickly final courts wake quietly foxes. packages hag|
+120|Supplier#000000120|TNxoTw0SiMmQzGfZY9fTSkL2kmtPwvtR2L|7|17-645-761-5674|5575.40| beans cajole of the sly, dogged courts. |
+121|Supplier#000000121|CWGri,tKI 7gDcDsI|6|16-275-849-2485|5733.61|against the ironic, permanent pinto beans. doggedly pending deposits sleep agai|
+122|Supplier#000000122|2RUSHHspScCVTWC6z vw2XVR|16|26-432-258-4986|2732.95|ackages. carefully special accounts use slyly. slyly silent i|
+123|Supplier#000000123|IqRn20xsj5ibqAQjb6YNQf0xah|19|29-602-688-1506|5726.19|nts x-ray quickly according to t|
+124|Supplier#000000124|vmVadCZ xHPbQQA2fLxr68T1YhmjVSuRUJKCrcq6|8|18-313-889-3713|-941.38|ular excuses after the fluffily pending pinto |
+125|Supplier#000000125|XG eO4Xb4TSF7rj4R6WRQ1v2seTlyga3tvFZaC|2|12-419-430-3983|5157.25|ven accounts. fluffily ironic deposits are carefully. s|
+126|Supplier#000000126|CaO4YuZ oSkzemn|14|24-728-670-3468|6829.86|unts. carefully regular dolphins s|
+127|Supplier#000000127|VEqo3HZJ,0ggcaxvqZnW7Rq7l,cPqfKgX2bIRavM|1|11-265-565-1616|2502.95|yly above the fluffily ironic accounts.|
+128|Supplier#000000128|u,I0ayLPI2HKL|23|33-957-200-2556|1091.04|ng requests. slyly final ideas affix slyly even requests. fluffily regular theo|
+129|Supplier#000000129|9kWUk5K 7TAR40dW5nVeg i60Fy|15|25-314-164-7014|9001.17|even excuses wake even accounts. slyly bold excuses |
+130|Supplier#000000130|Rnt93MAs0EpPZjPYQIlGOWNUjk|23|33-322-101-6260|8708.17| blithely regular packages sublate. closely idle accounts are fluffily final, brav|
+131|Supplier#000000131|u3mTHMgBC0yJTLufr01TuHImgflQUXv|14|24-293-181-3975|1301.20|to the regular, pending instructions detect against the p|
+132|Supplier#000000132|sU92Jd3aQSbV|5|15-776-909-1326|-812.17|al, final accounts cajole requests; fluffily pending instruction|
+133|Supplier#000000133|QT,semYwdx|4|14-731-952-5158|9754.60|he quickly express instructions |
+134|Supplier#000000134|Nv7dxj4FGWrdcP56RmNci|6|16-184-952-7778|-329.16|c deposits haggle. dinos a|
+135|Supplier#000000135|F4Uy ZQNU6ESTmO3mrL,mI|7|17-290-812-8855|9767.99|courts wake slyly instructions. furiously silent requests cajol|
+136|Supplier#000000136|SyWVom9ZFrTA6BMAS|8|18-175-739-8397|4623.48|requests. boldly regular deposits are. packages are sometimes! c|
+137|Supplier#000000137|j81M1VQCvKcIVnpMnIUnBNVUOm0XYJuJY|18|28-108-440-4890|837.27|he blithely bold theodolites cajole blithely among the |
+138|Supplier#000000138|utbplAm g7RmxVfYoNdhcrQGWuzRqPe0qHSwbKw|19|29-533-434-6776|906.07|ickly unusual requests cajole. accounts above the furiously special excuses |
+139|Supplier#000000139| 2mQLQsVJ8WLBSnl0R bXrcyTgqXKrplgxb|22|32-788-265-2743|2217.93|arefully ironic ideas: slyly regular deposits about the furiously ironic requests|
+140|Supplier#000000140|mdfYYe8U sSb|16|26-379-377-5829|6727.48|sly final pinto beans affix furiously about the packages. even, bold accounts affix permanently fi|
+141|Supplier#000000141|5IN1dvjqFhyfKxoslkY3UL7CrNVPCZmq|10|20-557-547-7976|8842.14|quests detect blithely even ideas. unusual, regular accounts sleep blithely carefully regu|
+142|Supplier#000000142|HvfHnP57pz0w6l|16|26-302-532-7958|8991.41|dly ironic packages cajole amon|
+143|Supplier#000000143|a3v,6RZMN9p FzRTdV,fm7ehoVgEhifejGnrNY4H|12|22-916-300-5765|9658.99|s lose slyly about the blithely unusual asymptotes. blithely silent instructions cou|
+144|Supplier#000000144|f8tddEKps816HHqNwsKdn3|20|30-726-423-7363|9806.29| carefully even pinto beans n|
+145|Supplier#000000145|pSsVRBOlNKVuvXcjAEKxxy0hD8kA aZf|6|16-136-582-9756|-685.94|he carefully silent requests. quickly careful deposits are quickly about the bold, r|
+146|Supplier#000000146|rBDNgCr04x0sfdzD5,gFOutCiG2|22|32-792-619-3155|8271.39|s cajole quickly special requests. quickly enticing theodolites h|
+147|Supplier#000000147|oLdl SQpf,ZXebBw5F3g9|23|33-828-583-6731|8387.76|its. carefully pending packages after the ironically special frays wake above the special deposits. |
+148|Supplier#000000148|bkCBZzewuerw8xHv|7|17-648-666-9156|6089.75|ckly regular grouches. carefully ironic deposits cajole acc|
+149|Supplier#000000149|pVyWsjOidpHKp4NfKU4yLeym|6|16-660-553-2456|4518.31|ts detect along the foxes. final Tiresias are. idly pending deposits haggle; even, blithe pin|
+150|Supplier#000000150|kZajmmtJB4g,nNlaHxUvXLfZW0hVtnR35LKA|5|15-169-420-7828|4161.22|l instructions sleep quickly regular requests. final orbits use never furiously ironic reque|
+151|Supplier#000000151|2hd,3OAKPb39IY7 XuptY|22|32-960-568-5148|8561.72|hely final packages. ironic pinto beans haggle qu|
+152|Supplier#000000152|jTZy8PjLJqfLxgHm7,fDoJu7ZMs luO2YmN63|24|34-659-493-1274|3846.60|lyly even platelets cajole. fluffil|
+153|Supplier#000000153|qkCHAU1v9CtEBOGXhdFXJsx5L5gViVm5k,|9|19-346-843-5260|850.55|ress quickly. even accounts are around the carefully bold packages. slyly regular pa|
+154|Supplier#000000154|gB51OPlY yleFDn,pnDRbsDqy1gYM,SNRbo|13|23-471-808-2661|4155.67| special ideas haggle carefully ironic pack|
+155|Supplier#000000155|NFoZlgq90N33cC4HbLcfIGb1uqIZy85l42qa6|22|32-417-987-3690|3401.43|es! slyly ironic requests cajole above the daring account|
+156|Supplier#000000156|,KXbCcjqZNjmyHzeqaL4|5|15-106-692-4998|9780.74|sits cajole carefully instead of the final, ironic requests. furiously ironic deposits abo|
+157|Supplier#000000157|,mEGorBfVIm|3|13-776-259-5994|-963.19|ove the silent deposits. carefully pending packages cajole furiously final packa|
+158|Supplier#000000158| fkjbx7,DYi|7|17-873-902-6175|1596.44|cuses sleep after the pending, final |
+159|Supplier#000000159|xftQu5vkiD6BF|4|14-606-224-3002|326.36| blithely quickly ironic platel|
+160|Supplier#000000160|LG6VM3F8MhGnmoZUpyqHgsV0IP6gOagh|4|14-471-505-8811|5067.64| after the furiously express deposi|
+161|Supplier#000000161|fsteD4OulIaUNa IPXVesILV|0|10-604-123-7428|7810.78|doze among the slyly even platelets. packages believe blithely furiously final ac|
+162|Supplier#000000162|hf2fnryOkI9VjFN8R8i5rRWguXzdlMu3o|9|19-905-888-6020|-170.22|sits. even frets believe fluffily. fluffily regular accounts |
+163|Supplier#000000163|1MhydNirC ,cuLIZezbkUxJhAC53ii,B|12|22-980-516-1217|7999.27|es sleep about the fluffily express platelets. even multipliers must have to sublate. bli|
+164|Supplier#000000164|7H,0hAZkls5qVS 6sjbnVnQtpjMnGEv3gKXVS|2|12-414-446-6598|-264.20| the stealthy accounts. quickly stealthy warthogs detect. final deposits|
+165|Supplier#000000165|iPso5qCxSnxaNsRe9AU05Vl9hWm5oHIS|14|24-906-333-8640|1550.62|ions sleep silently. furiously unusual requests are slyly. express requests hind|
+166|Supplier#000000166|zCr2Z0hHyLjSz|13|23-117-751-3240|258.33|s. regular, unusual requests about the bold packages grow careful|
+167|Supplier#000000167|FEDldnEfBHVGOmKdFevemmG2|20|30-725-194-1727|5731.87|rding to the slyly bold deposits haggle furiously furiously regular accounts. blithely bold pac|
+168|Supplier#000000168|NNvNhn6dWzr80Igr|9|19-985-776-2090|6559.35|y final packages! express, ironic foxes snooze carefully above the furiously daring theodolites|
+169|Supplier#000000169|ycymrfB5JV1vU,swPXggAt|13|23-698-509-1073|-927.50|ss, even accounts. ironic packages sleep blithely after the slyly regular d|
+170|Supplier#000000170|RtsXQ,SunkA XHy9|23|33-803-340-5398|7392.78|ake carefully across the quickly|
+171|Supplier#000000171|eRpB8T GdtMBvJOo|10|20-229-200-1299|3549.54|es are. blithely final packages are unusual requests. fluffily expre|
+172|Supplier#000000172|NckigAXBRUXbJI|22|32-481-329-1585|2077.39|efully ironic packages x-ray thinly. slyly pending hockey players haggle slyly. sly|
+173|Supplier#000000173|OqQzF6rfxDvkjpMXVCwGfQzj4oTHBHyW5kC5Gjxd|12|22-640-545-4690|9583.11|ly regular escapades use among the express deposits. unusual, silent deposits wake. ins|
+174|Supplier#000000174|e1NMjwAq6RdyIKeA|22|32-840-184-9487|-561.50|s. blithely special dugouts integrate furiously. furiously bold accounts haggle th|
+175|Supplier#000000175|Lgv gYbBdu S9|12|22-392-226-3266|9845.98|leep. quickly blithe theodolites wake slyly. furiously pending ideas haggle after the slyly|
+176|Supplier#000000176|OLVnGuOx8m6NfApzODj4 JP01JJIm,qI53BChmgQ|10|20-970-245-2712|6387.89|s according to the carefully expr|
+177|Supplier#000000177|IUZ 7G x212nzZY5aQS|12|22-246-174-4465|-741.79|fully after the always ironic theodolites|
+178|Supplier#000000178|VJ9DInoVjbDg|16|26-471-122-2582|4693.27|hely final foxes instead of the express, expres|
+179|Supplier#000000179|d3le3XaTUC|19|29-560-587-5604|7727.83|kages solve carefully alongside of the furiously regular patterns. blithe|
+180|Supplier#000000180|JJzFp5wZcS0KpMLM95tYmq5Pv526UBfT8vrfwBk|7|17-600-237-1665|2753.77|ic deposits wake furiously even, express accounts. slyly express packages detect doggedly|
+181|Supplier#000000181|7g8adZQXXuHAYdoULLDVKYRtBhdddGqYj7pMzva|14|24-682-737-4806|3665.07|usual ideas. silent requests boost across the quickly regular instructi|
+182|Supplier#000000182|KuSXGdsAdYdkhzVLmgo4Xs|7|17-825-333-7344|7872.30|gular instructions. blithely regular sentiments around the slyly silent dependencies inte|
+183|Supplier#000000183|zAAIv68BEXvllrfgsW,i8e|0|10-842-403-7954|-192.51| slyly final dependencies alongside of the fluffily iro|
+184|Supplier#000000184|VTbbcJp9vdqn,tJA5pG5V,596mud6ZdspXgpUWX |1|11-802-685-1889|7448.53|special deposits cajole quickly even asymptotes. quickly ironic d|
+185|Supplier#000000185|F4C0AWRPk3|17|27-586-282-7422|5435.91| pinto beans. quickly express packages |
+186|Supplier#000000186|g huGEW5nrQ0Lmd6|1|11-398-253-5445|4397.48|g along the pending deposits. slyly final foxes sleep fluffily above th|
+187|Supplier#000000187|oMtyTl6hTyLQhiBwrUaa42zKOjfNobEq|18|28-127-320-9230|8517.04|riously along the carefully silent instructions. |
+188|Supplier#000000188|boP,dP6PjNCADoMv3FaXKRREikgs4J7cYng|20|30-573-811-4001|1020.57|ual requests cajole. final deposits|
+189|Supplier#000000189|SlPFDFJnn4gtrol|9|19-482-239-6669|1636.09|y regular courts. furiously express deposits haggle slyl|
+190|Supplier#000000190|qpXGL8oM1 wzQd|10|20-995-359-7606|6705.44|regular deposits haggle across the final, si|
+191|Supplier#000000191|X dZg6Dtv17X7|18|28-437-699-1621|1445.00|en packages haggle blithely regular requests. silently ironic packages n|
+192|Supplier#000000192|Tub1t4UlJwZ5U|15|25-585-189-5975|7031.84|ccounts use blithely. unusual, regular ideas use qu|
+193|Supplier#000000193|0HvoBt,qEF EaKzYNCl|5|15-872-804-8448|9095.64| packages. regular ideas sleep about the fluffily even deposits! special dolphins nag sly|
+194|Supplier#000000194|MDIkT8cHs7|9|19-741-822-3512|1487.08|nusual platelets cajole according to the|
+195|Supplier#000000195|xWy21YlUy3R6L01|2|12-395-261-9720|-767.31|e unusual foxes. express, final ideas sleep! carefully regular ideas around the quickly expres|
+196|Supplier#000000196|TQX4fMwB5tXoz4Fi4CEhALhi6|18|28-430-406-1127|4710.62|ke fluffily regular requests. furiously final|
+197|Supplier#000000197|YC2Acon6kjY3zj3Fbxs2k4Vdf7X0cd2F|3|13-999-250-8664|2398.30|ithely final ideas within the furiously ironic requests cajole|
+198|Supplier#000000198|ncWe9nTBqJETno|6|16-355-298-7120|2187.91|ts are blithely stealthily unusual asymptotes: blithely final excuses wake. ca|
+199|Supplier#000000199|k,8F8FGDuN 3udblO|22|32-268-147-8879|8151.22|requests. slyly express foxes across the blithe|
+200|Supplier#000000200|MNqafnV52UKvwuk3jFCn,AJkL|11|21-962-509-4762|1589.13| beans x-ray enticingly express accounts. blithely final deposits run; regular packages are|
+201|Supplier#000000201|e3lZO QY9QpCVdc0HXrqXB5uxXnOi r|18|28-782-585-7326|9537.73|kly above the special ideas. even requests nag carefully. quickly pending t|
+202|Supplier#000000202|NALZjSfea6SY zB1,I09OJYGrA8bwR4pU|23|33-549-918-5721|6739.52|courts cajole bold, special accounts. bold packages haggle re|
+203|Supplier#000000203|wvdhblw9JkIe52z0gnGSDrDBsMsI3Aidqk|1|11-257-649-3327|2150.78|ar warhorses detect. carefully final requests since the unusual, ironic deposits b|
+204|Supplier#000000204| 7bts1RIiMsy35F6V7 lC|15|25-718-760-9193|172.87|cording to the furiously even pinto bean|
+205|Supplier#000000205|rF uV8d0JNEk|3|13-272-651-7242|-670.30| beans cajole regular, pending packages|
+206|Supplier#000000206|hva5xXEn5j7H27CdKcjFqi,QnunUZyt8,a|19|29-156-330-8311|7136.21|quickly pending packages. regular, quiet packag|
+207|Supplier#000000207|0QmBvfdQPh7Xy09txlnJcv|2|12-245-833-1389|3285.77|xes. pending, regular deposits x-ray against the unusual deposits. final, bold platel|
+208|Supplier#000000208|kr uyD ,K95lEvq77tuHVjOJM57|4|14-932-450-9228|1938.69| instructions boost blithely along the|
+209|Supplier#000000209|fpTboatC6ogrozMPApz2DXmlukC9YVzCLCgQOtgU|18|28-134-789-6780|-395.27|ly ironic ideas sleep carefully even courts. slyly speci|
+210|Supplier#000000210|eC5 e7DXYBWi8XlnFtBxF,tSWPyQAzqb8gAZ|2|12-385-448-5157|9657.88|ccounts haggle after the carefully pending acc|
+211|Supplier#000000211|acmexYmhAZhFyM|2|12-150-553-5979|2080.07|quests sleep final pinto beans. blithely bold theodoli|
+212|Supplier#000000212|ZutN4kNIsv7sPWIV9,7|7|17-617-724-5874|-335.47|ular requests cajole furiously against the spe|
+213|Supplier#000000213|1s7 4odatD2BWz1QjXR2 2SnFhc,Rvr2Icxh1m7f|18|28-317-938-6979|2616.54|, regular packages. request|
+214|Supplier#000000214|B3uLKyb, xkfHbTSUBe6HwwaBPdCvhiOqO4y|14|24-114-624-8961|7089.25|omise blithely regular packages|
+215|Supplier#000000215|YmZQvVCVanip2E|12|22-333-479-3656|6125.89|fully upon the slyly bold excuses. blithely regular deposits promise across the slyl|
+216|Supplier#000000216|K83M7iWDJx N Y|14|24-182-902-2539|6902.54|luffily final theodolites haggle slyly pending platelets. speci|
+217|Supplier#000000217|UVrZj7M2mMd7JLjbc tvFmQad14d1oid|15|25-113-702-9259|3084.60|arefully final accounts. slyly even ideas haggle along|
+218|Supplier#000000218|c7CBVglgEM0XU,8bOU76GjEr5L5EXu9Uxa7U|10|20-180-767-8426|8843.88| fluffily. furiously regular instructions sleep slyly furiously regular packa|
+219|Supplier#000000219|f0siVrn1T,dOIJgYU|9|19-708-346-1944|5222.69|odolites use carefully about the reg|
+220|Supplier#000000220|uvWVvuY3tHcE2W34jcO538wy6|19|29-197-645-8102|6746.19|. furiously regular foxes solve blithely. ironic requests mold carefully. blithely unu|
+221|Supplier#000000221|aU oCBZs0CUUTU|11|21-402-925-9045|4283.34|onic dolphins nag furiously across the silent deposits. carefully even ideas snoo|
+222|Supplier#000000222|2JQCRHT8coRlrMria2|15|25-899-779-7536|4173.23|ctions x-ray against the s|
+223|Supplier#000000223|MCS3 zWF3Py8UZK PHdI6LsQD98phHJ7|2|12-790-796-4808|1518.42|y fluffy packages solve amon|
+224|Supplier#000000224|iD7eF,uwQxxPCAFTmdXYV7N|5|15-446-447-2944|9036.79|atelets. regular, ironic gifts eat furiously across the permanently silent instructions. reg|
+225|Supplier#000000225|6Ez0sxpUfmtjqFDO|9|19-787-123-9863|2461.11|d packages. unusual requests after the furiously bold packages wa|
+226|Supplier#000000226|83qOdU2EYRdPQAQhEtn GRZEd|3|13-768-844-6969|1906.46| deposits run fluffily: excuses breach closely against the slyly regular dolp|
+227|Supplier#000000227|Qo959Dll Bd7xvfq3ELtCq|4|14-215-994-7949|7896.01|ng accounts solve furiously at the furiously express packages. carefully bold packages sl|
+228|Supplier#000000228|pyTY uocaSasIUlrHUbBwM,r,|14|24-920-216-6514|2291.35|s above the final deposits detect along the furiously idle packages. b|
+229|Supplier#000000229|ycjgLrk,w8DcakfwTS1SO5kVch|14|24-392-655-8913|8751.47|ly according to the carefully regular |
+230|Supplier#000000230|TgT146Clu9ODbYYBx4Wd8oe|1|11-819-222-2128|2975.98| regular dependencies. packages after the accounts are blithely blithely bold packages. furiously |
+231|Supplier#000000231|uZfFnyAs9oFJ0No97rtQ OIV1AOYi|5|15-551-432-9958|9287.33|mptotes sleep above the regular dep|
+232|Supplier#000000232|90YJjotHlfwyieaTfuBJ8kohU5Oc83bESout,p|7|17-478-427-3811|300.80| asymptotes lose slyly evenly ironic asymptotes-- blithely bol|
+233|Supplier#000000233|Pu9mqssUjJNoqiGG4vfLTn3ENJnNPVrBNnXJM5Eb|6|16-283-737-6972|4544.18|kly pending platelets sleep blithely along the ironic, |
+234|Supplier#000000234|iMrk7HUD87at3IIh4rBi|24|34-187-193-2209|9957.00|nag furiously pending dependencies. unusual deposits integrate fur|
+235|Supplier#000000235|W89jjgy458|8|18-835-352-6531|2424.93|ackages run blithely above the fluffily even dolphins. dep|
+236|Supplier#000000236|dZExtd1dlMyrPdSqDC3|15|25-250-525-2436|9844.00|lar platelets. blithely ironic packages cajole blithely special ins|
+237|Supplier#000000237|NzEXFiPN5tnrZzs1E,uGeoEqolOlDYE7oQHnQWg|23|33-976-749-2413|6170.98|regular ideas. carefully express ins|
+238|Supplier#000000238|xjSZNq AIqmrV UfxyGcS04RYOzW|8|18-950-354-3488|5763.17|s. carefully final courts impress furiously among the slyly regular deposits. ideas thrash furi|
+239|Supplier#000000239|XO101kgHrJagK2FL1U6QCaTE ncCsMbeuTgK6o8|22|32-396-654-6826|4672.25|arls wake furiously deposits. even, regular depen|
+240|Supplier#000000240|yMzL86zw28z6sMa|10|20-843-630-4161|6537.07|sly. final, regular pinto beans unwind slyl|
+241|Supplier#000000241|K3sXvTEnHT9yQSbzZmbPNY0,iPQsmySI|8|18-213-514-9357|9594.49|arefully final foxes. regular, pending requests haggle busily special asymptote|
+242|Supplier#000000242|cpZMlI77TRq|11|21-489-286-5908|3736.64|nic accounts sleep furiously according to the quickly pending requests; slyly bold deposits haggle. |
+243|Supplier#000000243|8aQ3HGeOXxgYeMAXZQe B5y2RKEF5jdmN3Qb|6|16-554-376-5494|747.88|kly silent requests among the blithely regular foxes use fu|
+244|Supplier#000000244| c6fBN9a 6EOcB1ZjbImMBAQMwI BKScDNVRP8|1|11-556-986-9638|5489.08|d blithely. pinto beans boost quickly. blithely bold accounts affix.|
+245|Supplier#000000245|8qUceq1TSsNaTol3Ul6TDNSu9yRddanHEL|8|18-739-793-2096|6943.28|ual deposits x-ray slyly. carefully regular epitaphs about the |
+246|Supplier#000000246|BatWQwH8DgkBs|4|14-278-988-3903|5740.87|ealms are. even theodolites use regular, unusual ideas. ironic, final ideas sublate|
+247|Supplier#000000247|0bkES oiL2joJGmxdGwPfVCOL,pIQ4JNZBPnOR|23|33-695-935-2388|3758.64|final requests. final accounts affix. express accounts about the furio|
+248|Supplier#000000248|vDPBiu4L7hPZxwaEoNW u,zHgXVHvBXFBdfuek|6|16-646-663-7202|7487.64|ss packages are idly about |
+249|Supplier#000000249|ktpns6Bvu4fP8Wzzlc8|10|20-241-221-7700|2800.60| regular excuses about the blithely pen|
+250|Supplier#000000250|JkzOvHRi1rEdkZ|9|19-347-403-1571|9681.99| packages cajole blithely pending theodolites. ironic, silent requests |
+251|Supplier#000000251|Uqi3s, iqzLxI4duoRfgkciiN4XuCvITGIUf|21|31-869-288-1674|283.61|eep blithely even, ironic requests. carefully pending courts above th|
+252|Supplier#000000252|xcaKgadrug|11|21-860-636-2585|1523.14|tain slyly fluffily bold ideas. furiously ironic ideas nag furiously slyly even requests.|
+253|Supplier#000000253|DbryhLi6gCv8A P9FkjNp56mLP4KnV9Do|21|31-811-659-6792|352.61|ng courts wake quickly against the slyly careful packages. even ideas nag caref|
+254|Supplier#000000254|c6h4mizJAVT0Oz|7|17-100-212-8737|6230.48|nos. bold ideas wake carefully among the furiously |
+255|Supplier#000000255|qx16XyCEUh9OawVeQWOlGlhAU32iHFPNkO|18|28-629-327-4139|4663.08|s boost. ironic pinto beans along the slyly unusual foxes haggle regular, final asymptotes. reque|
+256|Supplier#000000256|ma15idU1PzO7itP67W1Rd|11|21-370-961-2170|6469.78|sly pending deposits haggle|
+257|Supplier#000000257|BjFRji2XvSyOVe MHMP9r5vC2eP4kfF GXwBe |16|26-806-563-6761|630.74|odolites use slyly finally silent requests. package|
+258|Supplier#000000258|z8ImnYbuBbohh1 WT|7|17-820-568-1141|5736.09|press packages. final dolphins sleep sly|
+259|Supplier#000000259|2zzYBzG12K|10|20-901-530-6532|439.97|ts wake slyly along the express dep|
+260|Supplier#000000260|TQGxRpJe08nPRqPYDIMTnst87mC0HZJ,rlnCQTF|23|33-226-804-7400|597.64|silent, stealthy instructions. furiously final tithes hinder regular requests. expres|
+261|Supplier#000000261|vUT2UDI,GAqIA|0|10-547-567-3297|-466.40|kages. furiously express requests are. carefully silent deposits at|
+262|Supplier#000000262| 3kCm2eN3Jra2UzvWWBm,P04b|20|30-952-865-8647|2138.97| ironic escapades. furiously even ideas affix blithely. silent accounts thrash beneath the|
+263|Supplier#000000263|7Vdou,WjHE|14|24-203-272-3139|-316.81|s quickly regular foxes. stealthily ironic p|
+264|Supplier#000000264|Oeq2ei9wRqt6yOuAAb0KoMRyoH4v|19|29-870-178-3535|8327.34|e busily ironic theodolites. quick deposits after the unusual dependencies print sly|
+265|Supplier#000000265|eHF4Edu,B8,NgBSSEV4xNC37i1q08WCNKyOe6jP|19|29-734-865-6334|2638.54|le evenly besides the fluffily fina|
+266|Supplier#000000266|LGWx4Xcq0GwQVwTR|23|33-256-678-2321|2874.22|y express foxes. special, pending deposits are slyly-- packages detect blithely ironic se|
+267|Supplier#000000267|,E9NoXeK5qOwVRoutS,X8Gr|10|20-501-962-2896|4068.14|riously close asymptotes are. carefully bold p|
+268|Supplier#000000268|ApLM6aebtP79LIp|23|33-863-692-9039|6261.99|nic frays use furiously among the final, ironic the|
+269|Supplier#000000269|LxOgEClzbXDEYHYG2ZMFoWHrD,C|2|12-988-623-6829|6331.84|lar foxes wake quickly about the carefully ironic asymptotes. final, regular do|
+270|Supplier#000000270|PM P65mhBoeFWkFMi9,|23|33-682-295-4727|3339.18|ss slyly after the fluffily regular packages-- quickly express pinto beans nag blithely|
+271|Supplier#000000271|U0BSsnILvbk7mL1cim,ZX70QIiTz|15|25-270-292-6296|8576.95|its nag above the silently final escapades. final, even inst|
+272|Supplier#000000272|ywrDqLLTfKUF93|7|17-209-164-6836|6858.73|of the furiously final accounts. bold dependencies after the fluffily ironic asymptotes hag|
+273|Supplier#000000273|O0TyoQXlzJx|15|25-247-243-3871|3708.18| pending ideas. carefully silen|
+274|Supplier#000000274|usxbl9KSW41DTE6FAglxHU|21|31-571-345-4549|6296.15|ecial courts. express asymptotes must have to sleep theodo|
+275|Supplier#000000275|Mh9eZVjmBPZc5I2KHcMcfZOYbpFqrlVOLR|21|31-446-410-5215|8477.48|packages. ironic packages are. pending deposits are according to th|
+276|Supplier#000000276|KdVDs6EGfWVsPdjuCh9iep|6|16-752-344-8255|7144.78|cial, ironic theodolites against the decoys cajole slyly ironic foxes. carefull|
+277|Supplier#000000277|REebuerbQmMxlbCXnl2ihK,VyItkTNxU36  c|12|22-758-939-2357|4300.15|final deposits. unusual pinto beans after the even accounts affix idle, final |
+278|Supplier#000000278|gBQmkVmclUGbjwHVtrdmCk9Dwqd2Dez1|9|19-563-887-6379|665.89|deposits against the final foxes use carefully express ins|
+279|Supplier#000000279|aVGVO0XNwTEkFwH0OhaQMJC7un0EizPpq|13|23-617-962-7353|2613.77|accounts. quickly special packages nag at the unusual requests-- special d|
+280|Supplier#000000280|MZAJPsr3TS62ocxtRc|11|21-536-349-7369|4540.53|nticing platelets. ironic theodolites about the regularly final accounts sleep accoun|
+281|Supplier#000000281|A2sesSQAAj6wvPPKL X4caRp,O|0|10-553-675-3578|3717.19|ooze fluffily express, pending pinto beans. regular instructions haggle ironic|
+282|Supplier#000000282|Gh,k4vQQPzOyIA9ac9re5IjaV5LyXDtZX9O1kNG|8|18-420-459-8366|3751.04|into beans. carefully special excuses cajole furiously. regular d|
+283|Supplier#000000283|MWLpGjjs1WoofKSoN7Lm0DXcSY5H4E9CeneF67ZQ|17|27-409-203-4267|785.26|ess instructions. slyly unusual deposits according to the accounts run past the slowly even package|
+284|Supplier#000000284|7NF0OkJazPIJ7l,LBSw3abv|15|25-955-684-2997|6866.81|quests nag carefully according to the furi|
+285|Supplier#000000285|Br7e1nnt1yxrw6ImgpJ7YdhFDjuBf|3|13-907-873-9658|7487.27|hely? slyly bold patterns doze carefully according to the fluffily even packages. bol|
+286|Supplier#000000286|o80iAPvmwqM3WOA93pqBHT4Dsgy1rwG|10|20-459-893-8984|7517.31|iously regular pinto beans sleep carefully slyly even accounts. packages cajol|
+287|Supplier#000000287|7a9SP7qW5Yku5PvSg|3|13-484-626-4797|6541.85|nic epitaphs are slyly above the foxes. ideas after the quickly bold pin|
+288|Supplier#000000288|8fUJhFrKTMmi5L|10|20-850-734-7789|1884.76|ual deposits wake furiously i|
+289|Supplier#000000289|6Tua0IHqN0sod1Tpdax5hD0|16|26-396-901-4544|6144.47|equests can wake: furiously final accounts lose regular, final requests. special somas affix. fi|
+290|Supplier#000000290|6Bk06GVtwZaKqg01|6|16-675-286-5102|167.56| the theodolites. ironic, ironic deposits above |
+291|Supplier#000000291|0qDDQst1b1bznHQh5jsmOq8nxf8Pz1Kn|0|10-404-519-2270|9132.12|wake silent, pending accounts. blithely final as|
+292|Supplier#000000292|POEdp8d ug5bKQa,w4SAMaM2xna1ZtVk4z9loa|21|31-933-112-8506|9598.62|ular excuses. furiously even requests sleep carefully around the fur|
+293|Supplier#000000293|H2JnUWv1X3s0yI7i2tY5Vd0kd4f0|9|19-143-962-9484|7114.81|t the instructions. daringly bol|
+294|Supplier#000000294|20vnacv,dtZ0mr5kukNf|18|28-299-788-1836|6700.75|en accounts. silent, regular instructio|
+295|Supplier#000000295|gpm7fahY9j6YyTr Dozul|23|33-998-989-3147|-435.02|en requests according to the |
+296|Supplier#000000296|g,WJbekrbjAcpNtn2QRsWtYx2RNVk 9aY|14|24-250-680-5383|1549.59|through the blithely regular depo|
+297|Supplier#000000297|gfVOI9aT2roNGEgc|1|11-562-493-7660|4954.57|al, express epitaphs? furiously ironic ideas haggle regular, ironic instructions. carefully special|
+298|Supplier#000000298|oIB13yXjsqGSW|5|15-790-635-6692|-530.05|egularly unusual accounts |
+299|Supplier#000000299|kvWv3MYn,Q2SqJlckBfhpWYmnSeRwg6|10|20-180-187-8847|-561.38|lar asymptotes integrate darin|
+300|Supplier#000000300|YU QZvXHJC7,ZspUPGwaIOa|2|12-468-732-4623|811.42|furiously even theodolites haggle along the final, ironic foxes. bold deposits are.|
+301|Supplier#000000301|YPFTsQOPRAGIlBw|4|14-434-699-9741|6472.62|express foxes sleep carefully even packages. carefully special ideas cajole slyly. carefully r|
+302|Supplier#000000302|ow8Io1JHkPnIAGCQYCWC|18|28-734-845-8630|4422.77|tructions after the foxes cajole regularly ironic tithes. ruthless forges across the slyly express |
+303|Supplier#000000303|MCFk0WJH4O9Fj,m3as3bdIgnP4pe 8qPj,wfo|8|18-932-912-3102|4500.90|ously carefully final foxes. fluffily silent requests across the fi|
+304|Supplier#000000304|b4rSMq4y hBDMJgcNp|11|21-261-644-8980|8564.16|endencies across the bold deposits nag|
+305|Supplier#000000305|pKYTaixBtQ3AGJLwndRNxUlLUkXN667JT|17|27-607-328-4914|974.24|uriously across the requests; ironic requests serve bl|
+306|Supplier#000000306|T5EUviO4mqrGO6ruLcIoo29M5S|16|26-954-209-5690|3979.54| wake fluffily. furiously final|
+307|Supplier#000000307|3wL9YHFIvddxzh3mwy6SSrpfmzKvwAGmXK|14|24-499-938-5607|2168.65|s above the unusual theodolites was quickly quickly perma|
+308|Supplier#000000308|mVY7rtoxj9uSqzAJIp TOWPsv080hbPbOStGZx|8|18-360-691-8400|3513.89|tructions hang carefully according to the furiously close requests. fur|
+309|Supplier#000000309|gT84r,AFSrZQpiSCnE02B3QK|2|12-950-744-5100|7875.48|riously final deposits boost blithely about the even foxes|
+310|Supplier#000000310|I5Mw,rGgWQOFVotMHUmVjH|0|10-119-372-1062|9312.63|ccording to the carefully express dolphins! furiously pending dependencies integrate s|
+311|Supplier#000000311|yjGDnCKi4Wmtim H3n9p|22|32-445-679-8585|7431.00|uriously final requests integrate. sheaves against the furiously final accounts are evenly abo|
+312|Supplier#000000312|8XYqBq S,OWbSp9Y2qGBWEV4hH l7ywxk|13|23-829-479-5202|7844.41| furiously requests. always bold accounts solve |
+313|Supplier#000000313|9xrpKrwr9,Jgvm5q rGkZePkpsH5xEXZnuLJug|9|19-648-945-5128|4114.68| beans. special deposits use carefully after the blithely even |
+314|Supplier#000000314|CW6uXfPdJ6lmcJ|22|32-389-335-6770|7456.49|s dazzle regular, final dolphins. furiously unusu|
+315|Supplier#000000315|HRLhxvqBP,f,NjeUiT|2|12-574-691-2832|7315.00|eas. blithely pending packages cajole blithely ironic packa|
+316|Supplier#000000316|uXJ13cI7fXByGLBoQ8VU9AT|0|10-382-551-5105|5189.82|ding to the slyly unusual frets. accounts use carefully slyly silent theodolites. slyly ironic depen|
+317|Supplier#000000317|EwuMPFkb3IrSqitX8ddgxy QWhXxeM4RoOu2d|17|27-131-276-7910|4277.34| packages according to the deposits sleep carefully regular hockey players. quietly bold |
+318|Supplier#000000318|Cm5fnQ4M6VeJF17mBbV6iB0XoQL31STrb3Su|24|34-466-917-6708|2268.02|ly express accounts cajole blithely brave instructions. furiously even |
+319|Supplier#000000319|tcES8Ay3zcHQER9K6gAKFWlDvcg nrbPl|18|28-288-466-7847|1578.18|ts are furiously even, pending pinto beans. fluffily idle courts cajole bl|
+320|Supplier#000000320|v2FlHgKKiGiDwR7uoJNWF|21|31-984-744-5308|1485.45|ns nag carefully carefully pending platelets. pending deposits dazzle. requests above the i|
+321|Supplier#000000321|pLngFl5yeMcHyov|19|29-573-279-1406|4941.88|y final requests impress s|
+322|Supplier#000000322|lB2qcFCrwazl7Qa|6|16-803-605-4129|6102.62| to wake. slyly even ideas breach furiously. ironic foxes haggle carefully. ironic asy|
+323|Supplier#000000323|0LEOmcTTomY1F0y|22|32-563-275-6438|704.83|accounts. unusual requests haggle slyly special packages. always silent instructions e|
+324|Supplier#000000324|QDsgOSozg jniYR2HzxofLZyk0qGdJe|18|28-637-452-5085|8042.43|ithely slyly special waters. |
+325|Supplier#000000325|bQgx8ABSanNFNfCHY,2 uPvGfafaMC5|8|18-671-443-5744|-786.95|unwind carefully among the ironic, final requests! quietly unusual foxes hagg|
+326|Supplier#000000326|9kFiCwhcBldg4xwm|7|17-390-604-7483|4680.75|quests could use furiously across the ironic, even f|
+327|Supplier#000000327|MoC7Jc7oThpZ34HmJPKuUbOZwOyPOb1ksGlvT8o|0|10-519-344-7532|6408.52|final accounts poach quickly slyly regular requests-- furiously careful requests cajole car|
+328|Supplier#000000328|SMm24d WG62|7|17-231-513-5721|5069.27|he unusual ideas. slyly final packages a|
+329|Supplier#000000329|zPg8 aqmUQ4wHGC 8nO39C50AOMB1VxTsJWmpYyz|17|27-571-511-5321|2166.85|endencies. boldly silent pinto beans cajole. carefully unusua|
+330|Supplier#000000330|MUxmICc8xS41A|22|32-845-516-3236|6199.85|ix bold requests! final deposits against the ironic ideas boost across the bl|
+331|Supplier#000000331|VkdMTxXHy22069CWmK658|22|32-337-220-5366|6929.91|special theodolites use furious|
+332|Supplier#000000332|OpvOrxiHWJ6QM|9|19-777-809-6033|1106.07|ve the furiously pending warthogs. slyly special ideas haggle carefully. express|
+333|Supplier#000000333|MaVf XgwPdkiX4nfJGOis8Uu2zKiIZH|22|32-508-202-6136|8702.02|oss the deposits cajole carefully even pinto beans. regular foxes detect alo|
+334|Supplier#000000334|NHMGXoP 8cWsNXvV46AkZZJuptUm4mFo8|13|23-767-548-6051|7848.40|efully regular requests: final requests solve quickly. pending deposits across the blithely express|
+335|Supplier#000000335|JSwzRFY7dDNKfBe8ebMtm|16|26-741-643-2945|5119.09|ong the regular ideas haggle along the close ideas. furiously ironic ideas alongside of the fluff|
+336|Supplier#000000336|orYe2VXtABdK sUvMCOZ9ZMhBJTPp7W3pffWmjZi|1|11-866-373-2323|3603.62|refully even packages above the unusual accounts detect |
+337|Supplier#000000337|IRrbCdIS,GB4YYhr|12|22-951-643-8793|9029.85|en theodolites-- special, final deposits should have to boost ca|
+338|Supplier#000000338|2b6Gj,rSxqeIiAOMquj6c03p GmAzIog BrF05|16|26-565-914-7207|6835.16|fully silent requests cajole idly. even dugouts wake f|
+339|Supplier#000000339| ESOHA53rEW5G7Z75w5hJ|5|15-967-506-6094|3589.64|ts snooze carefully express accounts. foxes nag carefully |
+340|Supplier#000000340|Pk5F9dPqsa3k16I3UR282gY|22|32-849-350-3343|5001.23|along the packages. blithely final foxes cajole. unusua|
+341|Supplier#000000341|sdb9yPRPun Z awDuDPUId2NH0Yrz7dnJ1GBDc|14|24-320-626-2836|2755.92| express deposits against the dependencies use blithel|
+342|Supplier#000000342|YgcA0MYOSB1ou|2|12-829-736-2942|6791.04|s wake above the quickly pending attainments. furiously special re|
+343|Supplier#000000343|LTRj tcLNq34je60R7LkZtNrEwmry00DIXguSxMS|4|14-617-526-7362|-905.27|ckages sleep among the slyly express excuses. special, regular pinto beans are carefully: express d|
+344|Supplier#000000344|F mMVT6PuRj5S|11|21-808-924-2361|7082.37|inal courts nod fluffily for the care|
+345|Supplier#000000345|ZYuibETx2zArwg|18|28-503-710-4886|351.98|lar dolphins. carefully unusual packages according to the furio|
+346|Supplier#000000346|9vl7c3azrqt9wITrcglQhIGfwea|8|18-148-870-6674|6710.10|ecial accounts. quickly pending packages print. slyly bold pinto beans detect slyly unus|
+347|Supplier#000000347|zg0m5HrQtl D|8|18-932-818-1269|2674.43|special escapades promise pending, regular deposits. blithel|
+348|Supplier#000000348|ZNEHF5c7kP5tPGdQ ZrJZNRdQnu0M9LVyO urMm|8|18-338-824-3860|1172.37|eas. frays nag furiously final accounts. fluffily unusual theodolites use furiously above the slyly|
+349|Supplier#000000349|21Sp51XIZ9vTChQBWv0CA1o1P,26clhjR|13|23-532-708-7267|1033.10|lithe packages. carefully final accounts on the carefully final accounts sleep slyly |
+350|Supplier#000000350|KIFxV73eovmwhh|7|17-113-181-4017|3294.68|e slyly special foxes. furiously unusual deposits detect carefully carefully ruthless foxes. quick|
+351|Supplier#000000351|ZLWTvVCSmwsKfElT7K 2O1Ui|12|22-508-407-2628|8684.60|ithely ironic theodolites play. decoys sleep slyly against the deposits. s|
+352|Supplier#000000352|LsVcltEi9NYu10ByH 5grND|16|26-911-452-7918|1189.55|ickly. platelets sleep slyly blithely slow escapades. special requests boost furiously. slyly enti|
+353|Supplier#000000353|Bo,HYtujUMsMvE,|15|25-153-823-7261|3646.16|ular packages. deposits nag slyly abo|
+354|Supplier#000000354|w8fOo5W,aS|3|13-796-527-4255|8965.72|blithely regular accounts around the special foxes kindle blithely across the even dependencies?|
+355|Supplier#000000355|DuCQn,7qi1KL a99mTWSY4Z9eC53dslWPABGj7|16|26-389-592-6792|6929.22|ackages cajole according to the slyly ironic theo|
+356|Supplier#000000356|Lefi6RDDtvaVXqvhlbMuUrVm45oJbtkZM,Mup|11|21-198-523-7929|397.74|carefully blithely ironic excuses. enticingly blithe packages along the attainments haggle carefu|
+357|Supplier#000000357|Vf7Hi5DuzZ6RJ,mfaXBVNqx0|20|30-147-268-1519|5724.04|e slyly among the furious|
+358|Supplier#000000358|V3yxhHhHSXqOoc5UPv5TwkVDGKQsG|3|13-831-247-2584|6974.74|ans. ironicCustomer  requests cajole carefullyComplaintsy regular reque|
+359|Supplier#000000359|J5HqPZnfLcFMtlgwCnZPUI|24|34-121-923-9858|6476.58|ons. furiously unusual accounts above the blithe|
+360|Supplier#000000360|3avxIDL4YPrZVHie4rOiPrK8Z IJwEPqZaioHK|5|15-671-472-7761|4956.01|uickly regular asymptotes. packages ar|
+361|Supplier#000000361|f8IUYRmdVXhQC9qJQjWknCXmzhe38vCbk6|3|13-192-383-9438|1678.56|deposits. carefully final deposits cajole carefully after the furiously regular ideas. |
+362|Supplier#000000362|XdtN0U5Qm2Z|23|33-445-749-9918|5571.81|e furiously. slowly regular accounts sleep furiously. carefully bo|
+363|Supplier#000000363|sYpqZxYin8GKkPtNWKOnJMTLm9f5e0lZ61N8wp|1|11-220-343-2951|505.69|express requests cajole furiously blithely final deposits. quickly special foxes breach|
+364|Supplier#000000364|OhfGUPn8U9oBx5|9|19-653-706-8164|5105.84|regular dolphins promise after the special deposits. blithely final pinto be|
+365|Supplier#000000365|SZaykm40Np0vOKp|23|33-253-791-9564|901.98|s. deposits use slyly except the slyly final instr|
+366|Supplier#000000366|AtIdvjsMt9peVyEbpoDerNTteRF|12|22-648-291-8851|-535.40|ts. slyly special asymptotes c|
+367|Supplier#000000367|E Sv9brQVf43Mzz|22|32-458-198-9557|8488.53|ages. carefully final excuses nag finally. carefully ironic deposits abov|
+368|Supplier#000000368|3o5w6T5HzjFmSf1|0|10-694-873-8364|5783.96| nag fluffily alongside of the silent depo|
+369|Supplier#000000369|XKLa3tQT7,TgtuLi2Vme8vGyx|7|17-381-930-4614|2094.34|cording to the special, regular pinto|
+370|Supplier#000000370|yyNSJAG9UXcWit4SeMkEIrNcdVq5|0|10-602-768-3758|8515.99|ound the unusual foxes sleep finally within the furiously unusual requests. sl|
+371|Supplier#000000371|7kc0KqnPxrJuGZdrrec7Cn,wrCPdxPemNPZQ|19|29-501-449-3837|5358.50| among the ironic accounts-- regular accounts nod slyly |
+372|Supplier#000000372|Bdhu5NV4VfPYBxsCmK,YnkoHIaW|5|15-246-325-3001|1311.15|ltipliers. blithely regular deposits was above the furiously even accounts. q|
+373|Supplier#000000373|oQAQ3UNvyJW|16|26-656-301-9303|1461.85| asymptotes wake quickly around the slyly regular dependencies. regular attainments haggle along th|
+374|Supplier#000000374|svrrNWiqg1f3tEXZdVbFK CAtLDsW1CidtyS|2|12-699-158-6062|4263.58|ithely. ironic pinto beans use furiously abou|
+375|Supplier#000000375|3CIBgjwAjB A1uxkiJNjc 7pI9AKhvnr1BHV9|11|21-250-668-2735|1962.02|ully regular pinto beans acros|
+376|Supplier#000000376|L8OWL3jXMCR3Gh|16|26-752-731-5943|6579.41|usual dependencies haggle above the carefully regular platelets. never regular foxes detec|
+377|Supplier#000000377|L4SF6EzZ xhyZCQ59onlADR4|16|26-634-598-9185|6113.96|ly express accounts wake enticingly special, express frays. furiously|
+378|Supplier#000000378|FfbhyCxWvcPrO8ltp9|3|13-930-567-5190|4429.27| among the furiously pending excuses. fluffily express deposits except the slyly final packages|
+379|Supplier#000000379|jyGWzIJTAdI0Iko2o|20|30-202-917-6929|3698.31|sleep? express packages are quietly around the slyly even ideas. express accoun|
+380|Supplier#000000380|LiXmikqsO6R40FKovSUZpl|20|30-245-487-4913|5421.70|re. grouches against the blithely regular asymptotes sleep slyly daringly p|
+381|Supplier#000000381|NfyIKLJrXusWgmgkkCS6Gn3RRra|6|16-117-297-1825|7476.55| packages haggle blithely express tithes. blithely final deposits wake bli|
+382|Supplier#000000382|wNasxwKglHa|15|25-531-651-1411|3486.56|furiously final deposits-- even foxes haggle carefully about the fur|
+383|Supplier#000000383|pGEPbc70IKUZuTiTaigKuizjt7Y5oUoDL3Bq1|22|32-792-648-3774|-484.12|nic excuses. carefully even requests alongside of the regular p|
+384|Supplier#000000384|zMr51gtJ0Vu83Dk|7|17-554-428-8511|1342.17|taphs cajole furiously blithely final |
+385|Supplier#000000385|4RDwKCNc6 yBY|18|28-730-866-8837|5836.17|frays. requests sleep ironic theodolites. carefully even requests doubt furious|
+386|Supplier#000000386|zxFf8YWdD9ltSzw0NOTFpKcBH8zJrzif9|13|23-535-472-3290|-404.12| foxes wake carefully dependencies. slyly fluffy depen|
+387|Supplier#000000387|EEmqrW2gNAbuJjKuTPgA8kmKA0bZcQSmV|7|17-797-328-7624|3854.14|ld, unusual packages alongside of the carefully final deposit|
+388|Supplier#000000388|n27XQohXrXlJRLdsyXNoljPS|18|28-306-827-7902|6540.34|rate around the regular accounts. furiously special pinto beans use bli|
+389|Supplier#000000389|FW96liSdq3noHJpwM|24|34-885-883-5717|9467.35|nag ironic packages. ironic pinto beans would sleep furiously. regular realms wake across the |
+390|Supplier#000000390|8Qv7RjLQSFfyt5JpH8fsTf0|7|17-821-610-9726|868.36| to doze along the foxes. final requests are furiously. furiously express accounts use bl|
+391|Supplier#000000391|HBkwkigT2P9bU2wXBrPnQ|15|25-736-211-2793|6475.66|ckly furious dolphins about the furiously even d|
+392|Supplier#000000392|5YSB73Q4LMC9648IF1GGJAP|17|27-961-813-1153|527.38|ckages outside the furiously silent deposits sleep within the fin|
+393|Supplier#000000393|hxGMP2tFry WHEeI5cmEFVF027E|5|15-120-912-1594|6686.84|t the furiously regular deposits. excuses about the ruthless, regular |
+394|Supplier#000000394|5mGD3d,LeKycAyeYbVlrGMlzmT|9|19-517-731-4139|7685.82| accounts play quickly carefully unusual requests. blithely pe|
+395|Supplier#000000395|vYD9APwEz6R1BFWJ,GDJ7yCUZJm|18|28-723-165-7801|688.37|xcuses. regular deposits across the blithely final packages haggle slyly u|
+396|Supplier#000000396|WMtmXuQwBKESTTFH4ru1f|10|20-236-453-5458|6420.86|ts. quickly unusual accounts are fluffily above the express requests. daring, stealthy pi|
+397|Supplier#000000397|kFrhzYG1SR8aWrHsftcptN,im88pImSkxxFu|24|34-880-360-3462|3425.62|ependencies about the regular pinto beans haggle quickly about the s|
+398|Supplier#000000398|WTV,rO0S1KZFKlhLsUpAH|10|20-536-426-8920|9354.75|mong the courts nag slyly special foxes. furiously regular theodolites w|
+399|Supplier#000000399|UCzZPQfZXnRhGZcXfnnvkfnqLVEOc|20|30-814-148-6803|345.97| haggle furiously about the close theodolites. foxes along the bra|
+400|Supplier#000000400|QACx8vfYzPsZHCet2Yjst4e2XzjOXF|21|31-514-285-7013|4624.87|eas sleep furiously among the regular ideas; slyly bold excuses alon|
+401|Supplier#000000401|9 zStaJ sD|10|20-586-179-6994|2233.53|ages. dolphins integrate blithely final waters. carefully unusual accounts are fluf|
+402|Supplier#000000402|i9Sw4DoyMhzhKXCH9By,AYSgmD|3|13-109-731-3195|4943.01|around the carefully pending dolp|
+403|Supplier#000000403|TTVlcRcFrglnhCffA11iw l6bZyyaI9xcRF|9|19-513-268-3908|3102.18|he regularly ironic packages. idly final excuses integrate according to the |
+404|Supplier#000000404|RQwxP4tpScystYCqOJ,XpCWr4Kox4|7|17-943-161-3434|7311.61|. carefully silent instructions affix. blithely even requests unwind. final dolphins de|
+405|Supplier#000000405|dJIijVTX n7M0NDEQvCA |24|34-728-545-7374|3213.18|er the foxes. special warhorses nag fluffily|
+406|Supplier#000000406|zMhU58CDF4aHTeodxg9IgRZgq|21|31-926-216-4352|2867.41|o the even accounts. fluffily ironic asympto|
+407|Supplier#000000407|WliGC47Vto2nh7mj|11|21-112-803-6707|-459.62|ar asymptotes. carefully regular req|
+408|Supplier#000000408|qcor1u,vJXAokjnL5,dilyYNmh|22|32-858-724-2950|6173.87|blithely pending packages cajole furiously slyly pending notornis. slyly final |
+409|Supplier#000000409|LyXUYFz7aXrvy65kKAbTatGzGS,NDBcdtD|7|17-719-517-9836|-820.89|y final, slow theodolites. furiously regular req|
+410|Supplier#000000410|6V,FO4xJPwvxGzReYzVj6dwTSIechnSSCyz9iY|19|29-822-375-4854|6234.92| beans sleep outside the thin instructions. thinly even soma|
+411|Supplier#000000411|G9H53XVrdbhRgvQwho1AS|18|28-518-787-9625|2250.74|ial foxes. furiously permanent packa|
+412|Supplier#000000412|S2onAA,jGtQ3qfpN|12|22-351-499-2131|8183.66| the packages! quickly even warhorses haggle slyly along the final, expre|
+413|Supplier#000000413|GAufsRQQE P,dVCZWIMEUAsm,7|11|21-875-836-5585|7788.45| the fluffily even pinto beans. closely regular asympt|
+414|Supplier#000000414|FkmlHgU9pqCboQ32Lcx|21|31-118-322-1371|9547.00|. blithely unusual packages might cajole blithely regular requests.|
+415|Supplier#000000415|ibzrtLp NIBzzQVh2mc6M7GJj3V2Z5uKlIDw,z|20|30-297-706-6489|8575.44|inal deposits against the ironic Tiresias wake according to th|
+416|Supplier#000000416|F,9zQapGlzjmqRhVTj1DR|11|21-245-879-3004|3550.06|ic orbits according to the furiously f|
+417|Supplier#000000417|b3CbQxCMWWu,YyeQU 51fccuv7Mt|6|16-563-597-5520|-113.45|equests hinder quiet courts. carefully|
+418|Supplier#000000418|l07dIg BFdcW|24|34-689-611-9130|5229.01|re of the carefully final courts. ironic pearls haggle slyly along the bold, regular d|
+419|Supplier#000000419|FpWtqjkbqEXn|6|16-510-433-1061|2899.03|of the carefully express accounts. even tithe|
+420|Supplier#000000420|kLWtAMtbSn|12|22-971-269-4753|2968.22|eas cajole around the regular accounts. evenly even escapades|
+421|Supplier#000000421|tXZPR dOYjjbGjarXxKPn,1|8|18-360-757-8604|-128.86|c dependencies. quick, express deposits cajole quickly. fo|
+422|Supplier#000000422|iu4c7rkFFNOvmfx,aSs62I|10|20-645-417-6790|-755.64|ly even theodolites. blithely c|
+423|Supplier#000000423|VCgMjClu4IDaVVMwMW0ARf1ho|24|34-577-174-3894|2937.16|quests; bold deposits lose pending deposits-- slyly pending packages play slyly. regular, ironic mul|
+424|Supplier#000000424|uOdFKME6fSAI,rvLcpTL|22|32-406-948-7901|5881.52|es. furiously pending ideas against the fluffily si|
+425|Supplier#000000425|a KnEGf,bqEnGd2Wd9Tl|0|10-262-132-6639|2805.77|ular pinto beans are among the fluffily bold acco|
+426|Supplier#000000426|zjIHPRMAI8vF|23|33-576-289-4702|8621.42| requests nag. slyly regular ideas |
+427|Supplier#000000427| Hnd2bzQ95, Adq bg7BQAbFVmRV9rQ,jY98|20|30-315-782-5162|6223.17|he regular requests haggle blithely about the forges|
+428|Supplier#000000428|v,lOR2jikAbT0hNyPuYhhJODDs2VBPp|17|27-542-634-4350|2890.48|ly final packages. silent depo|
+429|Supplier#000000429|6ITML8w7yXMd5wzp4xUYXX7rb|23|33-283-246-6716|2267.98|l ideas sleep. furiously ironic|
+430|Supplier#000000430|SMdrDaNv,2XyFOL6oVEfvH|1|11-541-204-3496|939.22|? silent pinto beans are abo|
+431|Supplier#000000431|WoeV,NINojE6hJjauAdrl5fGcdxX5JUPLnrim|3|13-269-548-1399|9477.34| according to the bravely quick dolphins. deposit|
+432|Supplier#000000432|ZkBaoMg9n7nXd1fyn |10|20-401-350-6978|3812.16|ven deposits sleep slyly after the blithely busy decoys. slyly ironic deposits |
+433|Supplier#000000433|At103qyX,VicINJGCOU51mQyfdYBB44Cg0S|14|24-141-780-8375|4988.55|r carefully according to the furiously regu|
+434|Supplier#000000434|endL6N 85uU0NemLv4L3mSEH4LT2BF|5|15-763-277-6054|9522.03|n, final packages. furiously pending c|
+435|Supplier#000000435|xKvU,V2SZj3OqEwdlgXs01K jSbJRjYYF|8|18-874-271-6733|6776.54|nic theodolites. blithely dogged accounts haggle furiously express pinto beans|
+436|Supplier#000000436|MV8Xu3m,93IINpPlE|20|30-253-200-6170|8396.49|. carefully regular request|
+437|Supplier#000000437|HuqIk0sK4yC6x5be,cTlPrFqqBCHYf|7|17-652-134-3031|9807.53|usly bold deposits about the pending|
+438|Supplier#000000438|cePboEvTZ6IfUAG 8asHxVbEmZnLSph9z01|24|34-502-705-5611|9291.35|hin deposits. blithely pending deposits sleep slyly. slyl|
+439|Supplier#000000439|dTnCcwPBKS J WRmt,Emi KnILcwcR9YOrdLuWD2|9|19-415-856-7881|1871.86|ithely ironic packages use special foxes. carefully even packages snooze quickly|
+440|Supplier#000000440|s4UNoE4WDs9vXLZFJjsCVxZ0W8XU YCgqGBxVX|15|25-475-341-4841|9290.23|uses. requests cajole among the quickly regular fo|
+441|Supplier#000000441|fvmSClCxNTIEspspva|14|24-252-393-5381|5008.40| the even, unusual ideas. slyly even plat|
+442|Supplier#000000442|PJSCHXMAsqyRr5aPD9lp4tUl1B1WytbosVY8EdNZ|11|21-463-951-7051|2257.13|es wake. accounts sleep slyly. slyly ironic platelets haggle. slyly|
+443|Supplier#000000443|nbs9cmnC63bi|10|20-238-345-8589|5569.82|sleep even dolphins. enticing deposits wake. furiously regular deposits acc|
+444|Supplier#000000444|mHr2VcUpRkvyQ9rjKMaPkeWbVZmEIhxhb8F|21|31-256-493-5644|-12.78|riously final requests sublate slyly. furiously ironic packages |
+445|Supplier#000000445|WqzJKhnLnF05It4 5TDkGkUwVYszIko|20|30-686-270-2150|3065.22|r the stealthy theodolites. bold, unusual attainmen|
+446|Supplier#000000446|na LX4kqDQbh|8|18-902-583-3116|2141.08|usual warhorses-- carefully unusual dep|
+447|Supplier#000000447|UHeJiRfImFw1r4MTrBk0vcwUx9|5|15-104-804-3139|2108.30|deas thrash blithely. unusual packages in|
+448|Supplier#000000448|cYzWCXDovaNR ,S3PICo3KYKAG3bYm0YKyqaZVu|24|34-817-553-5356|1209.30|e thinly slyly even warthogs; final asymptotes boost unusual pinto b|
+449|Supplier#000000449|kOYLYC4JQ5tBVlul15gdo6smU,VdIObtXyC|10|20-757-629-3940|5701.21|gular deposits are carefully about the furiously ir|
+450|Supplier#000000450|t3hxOMnv5AFdpM4|23|33-257-936-2585|8199.71|oss the gifts. final accounts cajole. sometimes special asymptotes are carefully along the package|
+451|Supplier#000000451|cqMKQiLjokvIFG|6|16-328-146-7253|2503.45|cial packages. pinto beans |
+452|Supplier#000000452|6bT4efJCWyxEtXmA1ZdwmqfrPGK|17|27-445-799-5245|9524.84|ions wake slyly alongside of the carefully ironic theo|
+453|Supplier#000000453|bpt98PxU5HSQt61bVB695JPjBmJKUv hNzQeHvC|21|31-882-179-6015|-905.25| quickly until the ironic accounts. pending requests doubt bl|
+454|Supplier#000000454|K8p1uXD3L,L|0|10-453-843-1585|7337.45|ronic requests haggle furiously furiously regular accounts. stealthy asymptotes sh|
+455|Supplier#000000455|de1QmawQjYipd|9|19-822-816-5632|8563.10|le. slyly even requests thrash blithely across the flu|
+456|Supplier#000000456|iSqve6KC7t69,jX6,HF B8Hbat11O|5|15-951-880-7133|5154.37|e along the slyly bold theodolites. packages detect. regular requests sleep furiously b|
+457|Supplier#000000457|Jld2rUj,Xw3u,lLq2EevCRQVYwSnkCT1K7nY1if|8|18-721-125-2213|2458.18|lyly across the slyly even courts. quickly silent|
+458|Supplier#000000458|IFNkUK1H53HwUHabiONkMFAUDb|21|31-318-754-9316|7654.94|blithely enticing deposits are. furiously final accounts about the regular requests h|
+459|Supplier#000000459|w12ixcTkh6AtG1LvWxX8El8Nf4vEGQiZrrpy|18|28-295-883-6516|9569.85|arefully even deposits. furiously final requests sleep. packages sleep. q|
+460|Supplier#000000460|lGEBJPLJaDwOhZpc7DQMY,PTUEv6BVBUsOGK0oF|20|30-851-458-4942|7619.85|ar theodolites use carefully about|
+461|Supplier#000000461|VLYMztlQim7tjPGSK0xPZXnb91a8,9wqVRwk62BP|22|32-897-799-8437|2922.33|foxes. ironic, ironic packages lose furiously regular accounts. carefully slow excu|
+462|Supplier#000000462|Bg,C2gIsljPAG|5|15-387-146-3147|9497.29|uriously unusual courts. blithely express deposits could run|
+463|Supplier#000000463|XOb4DatMUyqMuFM92ZRaapwsEQ|0|10-178-678-7353|9091.71|lay. deposits breach instructions. sl|
+464|Supplier#000000464|XwZyuXCVeO5wb5izvhfeX|5|15-653-204-7643|1448.94|ly. ideas are slyly after the ironic, regular accounts. platelets among t|
+465|Supplier#000000465|YOOuLmTfTFNFiipLtt iL7HQ fj lf0xOKDjnu|17|27-586-454-8554|7267.03|ly regular accounts nag. slyly regular deposits run furiously slyly final requests. accoun|
+466|Supplier#000000466|HEW3DIL,Aw0Ud|22|32-408-942-6024|2051.26|requests. closely final pinto beans according to the quickly ironic instructions breach b|
+467|Supplier#000000467|aibBbBkbtmDJ988LnMNkCAi|12|22-666-307-4101|-467.16|ackages. even, final dependencies sleep quickly. carefully regular deposits cajole furi|
+468|Supplier#000000468|T,pwE,skbYjr5DCAD2EfmEHNychqFKw1loF|6|16-494-568-3545|449.82|nusual dependencies. blithely even packages are blit|
+469|Supplier#000000469|G4Xq2 RFlLP7uDadWjZ96 uyaGNk8 216c6|9|19-527-692-4725|994.99|ely express foxes. carefully unusual packages nod furiously. blithely unusual pinto beans cajole at |
+470|Supplier#000000470|XckbzsAgBLbUkdfjgJEPjmUMTM8ebSMEvI|19|29-165-289-1523|727.89|gular excuses. furiously regular excuses sleep slyly caref|
+471|Supplier#000000471|Fc4 FkVkaA8zsUVr,bT3PcTko0n|12|22-925-324-7167|-414.45|hely ironic accounts. ironic dependencies sleep furiously about the bold requests-- q|
+472|Supplier#000000472|NlJV2a0ovbomfosgHUBx6sgT|20|30-976-134-3524|6238.12|to the quickly even deposits print slyly ironic requests. sp|
+473|Supplier#000000473|x1skh3uebekXL4BIKGgIGDUfTk CDn5FIJGaq2|15|25-658-329-5141|1094.74|old, unusual grouches. furiou|
+474|Supplier#000000474|USHBMdX8iFodU|0|10-327-319-7717|5226.21| wake. even pinto beans sleep quickly about the slyly special theodolites. courts |
+475|Supplier#000000475|xw4V6,4QQW LI5Qg EOKy4JD B4Cq1tjzaOma9Y|3|13-397-755-1516|-115.01|among the slyly regular deposits cajole after the even theodolites. carefully unusua|
+476|Supplier#000000476|ZvT qI2gMbh|0|10-219-531-3554|980.32|o the silent hockey players hang quickly around the furiously special theodolites. carefully bold d|
+477|Supplier#000000477|VtaNKN5Mqui5yh7j2ldd5waf|7|17-180-144-7991|7205.20| excuses wake express deposits. furiously careful asymptotes according to the carefull|
+478|Supplier#000000478|4jV maCw9SEt8jyLUsjej60bmMhP6zBv ajTk|11|21-580-334-3987|7901.42| regular asymptotes: fluffily unusual accounts integrate |
+479|Supplier#000000479|tdLkV2Ks0wBP1VlwdnPUxZnWADmxlbmRVE0a6h|18|28-851-500-5156|5283.98|s. blithely final asymptotes haggle fluffily. regular ideas |
+480|Supplier#000000480|q8,LH5UQiP3Tv60slOsFzX,HM0JPcwM0rD7eg d|14|24-645-644-2970|2927.68|ular deposits according to the furiously even asymptotes use among the bold deposits. quickly|
+481|Supplier#000000481|VqFS2DPW Ci2TpkfD|9|19-951-947-8520|539.24|refully stealthy instructions hang blithely ironic pinto beans. ironi|
+482|Supplier#000000482|LkVra4orMCs|14|24-516-940-6953|7978.18|ages. final ideas need to wake quickly fina|
+483|Supplier#000000483|ncWfnroE1n639qMrW8|2|12-443-228-5035|8366.89|refully regular ideas. furiously express theodolites across the sl|
+484|Supplier#000000484|WwSH FFzB2lViwrWli6Z4QVV AN1KH2G8|24|34-506-254-3252|7350.40|t the pending, even instructions. blithely|
+485|Supplier#000000485|ULR12B9vkQg4g0nFMaW|1|11-779-798-5195|6282.72|tegrate across the pending, special instructions. furiously regular di|
+486|Supplier#000000486|nCwPqLXZwjH20OVRy,fCQskTo3it2JHEGn7l|20|30-632-817-3179|2709.83|nusual, pending deposits wake blithely ironic deposits. |
+487|Supplier#000000487|BvDBcJa,jQPslM|5|15-551-730-1265|2124.86|ly about the requests. accounts could use blithely? furiously pending accounts nag regular, even ide|
+488|Supplier#000000488|AzfObar4VYwnQvsGbISGCshVM AIWYq9|13|23-609-606-3265|4546.13|ly ironic packages use quickly about the ironic theodolites! blithel|
+489|Supplier#000000489|y9NMoYGxDUPfrB1GwjYhLtCeV7pOt|10|20-375-500-2226|9836.43| quickly carefully pending accounts. fina|
+490|Supplier#000000490|JNqlktPWJ4|2|12-619-779-5325|10.55|ng to the packages. carefully final|
+491|Supplier#000000491|mTbDcJHQ7d|0|10-361-729-1693|-836.47| to the blithely even deposits. fluffily silent waters sleep blithely above th|
+492|Supplier#000000492|8wEulEYM zGvMXfDNNEw4B|14|24-875-296-5180|8368.06|y. slyly express deposits alongside of the accounts nag fluffily after the evenl|
+493|Supplier#000000493|7tdI3AtlDll57sj5K48WLX j5RDbc|11|21-252-702-2543|4999.17|gular foxes. slyly pending requests hang along|
+494|Supplier#000000494|6hAiQHDGTy6,8bjpxI i3f|13|23-200-629-1426|1080.57|pending packages. slyly even deposits wake closely. specia|
+495|Supplier#000000495|p086j79twIlC25BD6A|12|22-881-968-9019|9097.65|y regular theodolites shall cajole. requests cajole slyly|
+496|Supplier#000000496|be4auZxyqAgF5ysH3nXWcc7bDsNgdZ|20|30-810-880-3654|2266.32|theodolites. slyly regular |
+497|Supplier#000000497|iLAqlCKDLUGqHrjuOcId7 uYoTmpA|12|22-895-454-2151|5980.87|es nag about the furious |
+498|Supplier#000000498|4jvUQrC4acOQ82EFM vLNHG|17|27-772-853-6514|485.25|. slyly ironic ideas cajole slyly quickly ironic deposits. blithely even theodolites boo|
+499|Supplier#000000499|NbcQeBiDiN2tFiVxHIaWU03BVFIuxt |18|28-243-638-7646|2518.34|al, express deposits hang furiously. regular, unusual pinto beans wake a|
+500|Supplier#000000500|jLfNCVrj7X5h31yfSR02Z4x7K|20|30-229-226-6452|6738.72|ly. carefully final packages boost|
+501|Supplier#000000501|PSStC43vWlQQpmTku4s|24|34-141-983-6520|-264.89|s nag quickly. platelets haggle quickly above the furiously silent packages|
+502|Supplier#000000502|AutSetu5u6moXK6Y3rpoWREh|4|14-678-262-5636|963.33|al excuses haggle furiously iro|
+503|Supplier#000000503|OtYqMbur3v7nfzYgFYmMrJvq5YTj1MtXgefj|20|30-263-152-1630|7643.78|players are across the slyly silent requests: even, r|
+504|Supplier#000000504|P8k2mjRiRUFCJfxw7KrEdRpNNQPDxiI|10|20-322-544-5770|9050.12|y final pinto beans. blithely regular instructions wake abo|
+505|Supplier#000000505|aqcYZYQD5TYlLDgIxhKZyFCzL3Ch5qKOxj|9|19-480-691-1853|6399.78| requests engage slyly regular ideas. fina|
+506|Supplier#000000506|hnXNVHB1ao5rlGLkrS64kBz5C5rx7 R4dqO5CNv|23|33-582-741-7991|1202.98|riously even accounts sleep furiously ironic foxes. quickly final requests haggle fu|
+507|Supplier#000000507|aF2w4JF8qV aaqApYqzTFtIXtKV57Na|9|19-981-569-8699|9464.26|p carefully besides the furiou|
+508|Supplier#000000508|F9,suuHYbe6kCRCPZaeSHSPAFBk9vOcFX8TUx|14|24-179-400-2422|3878.22|sits. blithely furious requests boost slyly about the quickly even packages. closely|
+509|Supplier#000000509|SF7dR8V5pK|6|16-298-154-3365|4315.15|ronic orbits are furiously across the requests. quickly express ideas across the special, bold|
+510|Supplier#000000510|VmXQl ,vY8JiEseo8Mv4zscvNCfsY|19|29-207-852-3454|-314.06| bold deposits. carefully even d|
+511|Supplier#000000511|RWNdTEe,VJFarN4Pu7Xl|23|33-476-213-8045|-455.12|t the quickly even deposits. carefully careful ideas sleep slyly bold deposits. unusual, even accoun|
+512|Supplier#000000512|MoNOuMC4QMOnBgD5uLcvtHCcfzf9cW|15|25-617-226-9364|4762.66|special accounts. daring foxes nag quickly silent, special packages. silent, unusual a|
+513|Supplier#000000513|YMhcTaVkhw0nO9B,|4|14-481-495-8505|8075.30| final requests. slyly final reque|
+514|Supplier#000000514|Q4ErX,NN,Z2UDP|14|24-677-367-2786|1669.85|arefully regular ideas: pinto beans detect across the slyly pending pinto b|
+515|Supplier#000000515|5TOuZXAb9df7m3BCW2 TeHMY1Zdf46DqpT2,0t|17|27-470-220-5233|2127.89| beans sleep after the final frays. special ideas across the carefully regular instructi|
+516|Supplier#000000516|z5Mm65PAP4m|5|15-165-647-2301|371.38|s. idly final theodolites ha|
+517|Supplier#000000517|NkRQYLe9d8vEXNO KKk8rxK502OI2 |8|18-577-641-3805|2781.03|ing asymptotes. carefully fin|
+518|Supplier#000000518|g2buDv7WzbkNDU63IN5af0i6SAdUxihirS2X|1|11-853-939-1266|-199.77|egular dependencies integrate carefully among the enticingly f|
+519|Supplier#000000519|0zgIDeAmk5976RzKiXZi,kobff8IxQn|19|29-133-225-9811|6361.20|onic requests cajole blithely furiously pending ideas. quickly quick accounts sle|
+520|Supplier#000000520|RHuqyeshPnOa6gwEiV3zDhP0o2aYvZ9glQURu1w|12|22-654-808-2429|1682.84|aggle among the final, pending realms. carefully regular escapades woul|
+521|Supplier#000000521|jQ648xqiuJaHLQjwrq5b|1|11-887-652-3799|507.99|carefully final asymptotes: carefully regular epitaphs about the blithely u|
+522|Supplier#000000522|joLGRuiXIsVWk|8|18-355-956-2843|-336.14|tect slyly final instructions. fluffily ironic ideas after the final, fina|
+523|Supplier#000000523|zzfDhdtZcvmVzA8rNFU,Yctj1zBN|18|28-458-231-8410|2492.06|e, regular deposits eat. fluffily express foxes haggle a|
+524|Supplier#000000524|rTYNcqhFosNU,4|13|23-993-423-3788|5093.35|c theodolites integrate quickly ironic deposits. furiously even de|
+525|Supplier#000000525|GtHZ9ooyeKe|11|21-247-956-8546|-79.52|y bold theodolites; express requests across the packages haggle slyly carefully final pinto b|
+526|Supplier#000000526|x8r7,jgpBRjq6Ns|5|15-539-980-8303|3477.18|as kindle ideas. blithely bold requests |
+527|Supplier#000000527|udKczd6U1Bm79UVDkA8P2Xa1VY qv9mvsXo|18|28-894-961-1780|6938.43| sublate blithely pending instructions. blithely final packages nag blithe|
+528|Supplier#000000528|QsnGjo7irxCIbN3|2|12-961-772-2408|1285.81|the furiously final tithes are across the pending ideas. car|
+529|Supplier#000000529|Fj4vm8y2 Tlgd|10|20-521-276-3787|4581.80|wake across the furiously regular excuses. express dolphins are q|
+530|Supplier#000000530|0qwCMwobKY OcmLyfRXlagA8ukENJv,|3|13-747-781-9694|4327.86|fily pending dependencies wake slyly final deposits. platelets against the slyly ironic requests na|
+531|Supplier#000000531|fN0ix827c112YajETqxxuofrfl7v VndWB38n|22|32-950-377-4573|4258.42| the carefully ironic gifts. carefully unusual multipliers sleep slyly amo|
+532|Supplier#000000532|ep92hT7VLaVlDKM7lgbj02kIL|3|13-595-401-8233|3278.71| fluffily fluffily express ideas; blithely special instructions wake quickly among th|
+533|Supplier#000000533|WF9wtTxzbBa4kv FAeer9I1pQJ0Qe,uJf3f w|7|17-404-617-2581|4213.95|e ironic foxes believe fluffily across the u|
+534|Supplier#000000534|JBhF3gZcQiNWGxh8DuoAhmVi|9|19-198-519-8383|3930.79|after the furiously even requests haggle thinly a|
+535|Supplier#000000535|aiq9Honllr6hFt, YJ6|10|20-121-889-4500|7839.46| foxes. carefully thin dependencies sublate furiously. regular instruction|
+536|Supplier#000000536|dzf PbgzKpWBDim5S1BSPLBNzxFpxZNUE|17|27-749-678-1361|8797.40|cajole blithely slyly even dependencies. carefu|
+537|Supplier#000000537|KsYYPIw2kWP|13|23-671-207-6720|5046.81| forges breach upon the bold ideas. final foxes nag frets. final instructions eat fluffily|
+538|Supplier#000000538|KBZ0RSDGTVJQPbWaU6x|17|27-799-369-5739|9669.24|e fluffily regular theodolites. special packages are into the careful|
+539|Supplier#000000539|GG5N3GIdNmmvhKs52Y|1|11-844-496-3836|5976.60|otes; carefully ironic deposits sleep idly along|
+540|Supplier#000000540|Tmyis ,xX7XjU2E|16|26-891-481-8993|5704.81|. blithely final instructions shall cajol|
+541|Supplier#000000541|Nxggufcm ,hR|17|27-914-557-6989|2830.62|blithely ironic accounts poach blithely alongside of the carefu|
+542|Supplier#000000542|3yWRklEDbAvfVuidQPgOOe,x7f,4 J5lSp4v|5|15-378-700-5884|8142.81|g requests believe carefully dolphins. quickly pending|
+543|Supplier#000000543|P10rl2 o A0jtJQDcB|15|25-433-303-6328|3222.71| against the carefully ironic excuses boost bli|
+544|Supplier#000000544|pUS3drDXbPeNqvI kUNlnz5GDSU5,aLI|1|11-471-707-9878|9042.70| sleep ironic accounts. fluffily even dependen|
+545|Supplier#000000545|D8SE9UgTdgq3oNH8RzaxDKpbsA BlRKsf|2|12-876-408-4086|2723.99|onic packages use slyly about the theodolites. final platelets are. finall|
+546|Supplier#000000546|YznQGwWvZbNA0O9ZV|22|32-679-789-3863|9036.47|es haggle. blithely final theodolites wake blithely. carefully regular packages |
+547|Supplier#000000547|MaV373lvwj|8|18-600-279-8954|7937.31| the quickly ironic asymptotes nag carefully |
+548|Supplier#000000548|btRiQsq qEK0qg0T|12|22-790-987-6339|3456.36|ly even tithes sleep alongside of the asymptotes. blithely ironic requests are |
+549|Supplier#000000549|oy89mLRUwTVCoU|0|10-377-880-8280|8948.84|iously final ideas. carefully unusual ideas wake fluffily special platelets. furiously unusual pin|
+550|Supplier#000000550|QQavssDXnYHbvOrg|12|22-648-743-9295|9238.79|en, bold ideas. ironic, unusual deposits boost carefully quick accounts. slyly e|
+551|Supplier#000000551|ZNiqP1w6Z SGZsLllIhaicTnLCCuAepdNbkm6pJ|8|18-297-775-8421|9364.67|lithely even instructions poach quickly. furiously bold accounts sleep final, final accoun|
+552|Supplier#000000552|JP3ebtH5epPwpU2lVQqKNZC3NTvnTlrhLy5eeGN|12|22-861-905-2401|2114.99|uickly about the deposits. furiously pending accounts wake. packages are slyly bold de|
+553|Supplier#000000553|a,liVofXbCJ|6|16-599-552-3755|3526.53|lar dinos nag slyly brave|
+554|Supplier#000000554|FAEEZyNtDGEhZ|7|17-365-531-4630|335.69|enticing accounts against the deposits use q|
+555|Supplier#000000555|TfB,a5bfl3Ah 3Z 74GqnNs6zKVGM|3|13-607-927-7514|7896.01|uriously regular theodolites according to the carefully f|
+556|Supplier#000000556|g3QRUaiDAI1nQQPJLJfAa9W|6|16-951-842-4482|2253.90| silent deposits haggle quickly ironic, final theodolites. boldly bold accou|
+557|Supplier#000000557|jj0wUYh9K3fG5Jhdhrkuy ,4|3|13-704-788-7706|6343.15|ckages. unusual, regular attainments|
+558|Supplier#000000558|T9hNjrRRRQmkbZomdaeLKDOqmmUcJpAJzXOxq|9|19-494-404-8664|6404.51|s against the carefully pending packages cajole al|
+559|Supplier#000000559|SkKThClbkbH8mIv|11|21-205-567-6566|3646.46| ideas cajole alongside of the carefully ironic packages. regular pint|
+560|Supplier#000000560|rYTPbri8qJ49rRfFmChtnDIQ|17|27-261-214-5284|3009.57|slow platelets. quickly pending ideas are requests. even theodolites may nag about the regular,|
+561|Supplier#000000561|1V3DMQWQpfjPJybZYAP|22|32-547-343-1231|1824.47|d packages. carefully bold ideas are quickly across the platelets. final, express pinto b|
+562|Supplier#000000562|8TXCtnRQzByqjie|23|33-782-496-5965|4848.52|he furiously special accounts hag|
+563|Supplier#000000563|Rc7U1cRUhYs03JD|7|17-108-537-2691|-942.73|slyly furiously final decoys; silent, special realms poach f|
+564|Supplier#000000564|IufyqhG4fmo VkgQT w BF4|2|12-702-995-1506|4410.70|epitaphs. even attainments cajole slyly regular packages. final deposits cajole. furiously final|
+565|Supplier#000000565|,oYB9wlD3mtL lj3PJC67a RGXaqh69sHK5G4e|20|30-487-989-9411|-334.52|s haggle never furiously special deposits. final attainments breach special pains. fl|
+566|Supplier#000000566|hBRvnhCUVIiXQK6dyAZYN,TNZItOlBvsVYCisb,O|24|34-396-766-5348|7705.73|rs! special packages cajole-- furiously final packages maintain slyly around the blithely spe|
+567|Supplier#000000567|fvuRpAap0MvoBguGKBfp|1|11-390-878-2811|5264.91|ke fluffily furiously ironic ideas. qu|
+568|Supplier#000000568|z70Hj8qVi8jQu|10|20-162-593-6831|1367.90|furiously fluffy instructions about the regular |
+569|Supplier#000000569|jjFjVCjK91yy2B dj|15|25-803-734-8127|9166.95|al frays until the slyly ironic requests cajole furiously about the quietly final foxes. furiously p|
+570|Supplier#000000570|8VkNpvXFgKgbKY2ypMKyIOBlK|17|27-658-225-4655|922.72|! regular platelets sleep furiously idly silent foxes. even courts sleep slyly. regular, reg|
+571|Supplier#000000571|GMifmfVJba|9|19-223-236-6710|7132.44|nto beans haggle carefully after the furiously regular reques|
+572|Supplier#000000572|J,RTwd9mNOTralFovrCrH99 f9rbvsDf3|19|29-444-247-7800|6039.27|its. carefully even requests along the quickly unusual pinto beans inte|
+573|Supplier#000000573|83GRV1s,yGFwl1NClLSXnJVRlh0xS8YW8|12|22-306-652-6853|3107.46|icing accounts. carefully regular sauternes according to the accounts wake after the fina|
+574|Supplier#000000574|2O8 sy9g2mlBOuEjzj0pA2pevk,|22|32-866-246-8752|8096.98|ully after the regular requests. slyly final dependencies wake slyly along the busy deposit|
+575|Supplier#000000575|J24LOV AQiHuYuR|19|29-201-935-5958|2918.54| special packages along the carefully e|
+576|Supplier#000000576|Plc2DypORn4qNOTpZ|12|22-316-723-5789|817.26|e of the final deposits. regular, unusual requests wake slyly. furio|
+577|Supplier#000000577|kn5oGAnFD1CQjet8awWorC,UMf37MP71yNcVD|13|23-973-363-7797|5593.17|olites along the quick accounts cajole throughout the regular asymptotes. accounts maintain|
+578|Supplier#000000578|bn5J0A4426DpcW7m rQ9,qxqJ1KN|20|30-105-334-1726|7428.76|carefully about the slyly regular warthogs. special packages above the regular pa|
+579|Supplier#000000579|U6sejT6kSPi5p1FUcUxjdJ|19|29-854-341-3857|3144.98|ly regular pinto beans. furiously regular ideas against the accounts nag blithely final pinto b|
+580|Supplier#000000580|MuRScZH74veaM2|6|16-732-277-6239|614.57|packages. furiously final theodolites integrate according to the carefully silent braids. caref|
+581|Supplier#000000581|X pNyEcNqxYwiP0gJ7FzkJ,haGkn|18|28-320-345-9799|8998.40|cross the quick packages wake carefully except the accounts?|
+582|Supplier#000000582|o6h0Bjjwags0FDRSAoKJPCWyt|4|14-341-851-2277|6178.48|ly unusual packages. regular, pending foxes are blithely. fluffily |
+583|Supplier#000000583|9st8mjB5G7J|15|25-760-126-2928|-339.15| dolphins across the carefully regular instructions hagg|
+584|Supplier#000000584|XvDYsHYpmY5AkX60fj0bZo4WW|18|28-223-704-2186|6912.86|e requests haggle carefully even ideas. express, bold requests integrate quickly furiously |
+585|Supplier#000000585|DQZTWEfNYL9UDlMqcQAEThcPdbyD45PYzL|23|33-357-931-8857|433.74|ar, silent instructions i|
+586|Supplier#000000586|9tfHwYyFe2t2,6pAVpkURXAxtc2cQw4qfGKYJ|2|12-747-610-3099|5850.91|ccording to the stealthily ironi|
+587|Supplier#000000587|58,gb EuMperMCg2lv XUQ9vi4GzhO2a|7|17-128-699-9949|5322.35|thin pinto beans boost silently. ruthless deposits haggle quickly above the slyly unusual th|
+588|Supplier#000000588|e3yF5zmSj y81I|14|24-180-601-5741|9760.06|gular, permanent accounts. |
+589|Supplier#000000589|3C4,WjUCjL59QhMSxyq1|18|28-878-356-5116|3415.90|apades are final, unusual instructions. bold, unusual |
+590|Supplier#000000590|KhRtsL4Foycp2hUwg bEHkDusXAf|9|19-277-247-1833|-12.84|de of the express requests. pinto beans are |
+591|Supplier#000000591|iXlVA9y6oX4|24|34-204-742-6291|2082.42|ven instructions try to are slyly about the quickly b|
+592|Supplier#000000592|tdYqh7rm0Zc7E0etRqHakcg,m34gQX|9|19-220-707-3861|6151.79|sual, express accounts integrate fluffily. dependencies cajole slyly|
+593|Supplier#000000593|qvlFqgoEMzzksE2uQlchYQ8V|6|16-262-671-5187|2214.36|arefully even ideas sleep quickly. ironic foxes wak|
+594|Supplier#000000594|8GY0oRK64AFmY7pys51Uqm7YbMn9luO,Z|17|27-826-454-6643|1760.34|quests use fluffily quickly final packages. carefully pending pinto beans are blithely among the ca|
+595|Supplier#000000595| CURZCs4l306M2ir8rFkgeYVg|24|34-354-570-3604|4922.60|ecial instructions cajole alongside of the requests. i|
+596|Supplier#000000596|caTnKVKTsCHNEVi1xVPD|4|14-761-106-2656|1180.93|its sleep. carefully unusual somas use furiously above the|
+597|Supplier#000000597|CKt5G XZ5DBt|24|34-219-790-3864|1828.73|ecoys sleep slyly according to the furiously regular requests. furiously expres|
+598|Supplier#000000598|p9AGBjg4DZuChQbY8gAj3LtMrxpOWqMpJR|7|17-985-962-2292|-590.83|uickly unusual ideas sleep blithely after the|
+599|Supplier#000000599|R u1wkvs4 B0wlbPbT8WrSzqoXZG0CjbsuAbPpUs|4|14-208-385-3654|769.29|affix. carefully final accounts about the care|
+600|Supplier#000000600|YHyUzea88sXoNmqmCMamiEfGC54xpdX|12|22-508-410-2758|2342.35|s. fluffily ironic deposits hinder furiousl|
+601|Supplier#000000601|TS2xMrQuUs9VrgWt4,gpdcEyWNw3K6,P|8|18-528-362-8573|7906.22|atelets cajole according to the pending, ironic orbits. carefully regular packa|
+602|Supplier#000000602|xEtByOs0Pydp9y75MSgoy6T R6PT8e|20|30-106-955-5651|8924.02|tes. furiously careful pains are. quickly even platelets boost sly,|
+603|Supplier#000000603|mECtpm1pmMnqK4K0DLZ5Gtkj 5bUydzBak6|12|22-807-182-2059|-846.12|dependencies. slyly regular accounts |
+604|Supplier#000000604|B53WjrwJCSh14Bx,oCEinGgCJ3ZCc8m|24|34-390-848-6584|227.59|regular asymptotes solve accordin|
+605|Supplier#000000605|wdwiNoNT8pVHOTHQ8jhVzaOTkU|6|16-835-870-9488|6071.58|foxes poach blithely beneath the excuses: ironic multipliers haggle quickly furiously unu|
+606|Supplier#000000606|n,iOFy5X,4GFeXNrCCKBmHucz1|19|29-856-255-1441|6988.38|es haggle across the carefully even accounts: unusual instructions x-ray carefully. blit|
+607|Supplier#000000607|vgEaPkxAonSSdAUn,7usQ c4G3Ho2r0|15|25-370-994-3762|4667.27|ests are closely quickly ironic orbits. carefully regular attainments cajole furiousl|
+608|Supplier#000000608|SQ,f89cn6x6g|17|27-435-165-2250|-210.13| do was furiously above the accounts. unusual, ironic packages hang about the carefully final|
+609|Supplier#000000609|n9 nkdqilT|12|22-852-519-5068|8287.95|ronic, regular ideas nag furiously across the final ideas. bold, express do|
+610|Supplier#000000610|cRikc,rgxAM3yz0IR85OD|20|30-402-585-4900|668.12|gainst the ideas. regular instructions are. theodolites cajole furiously final, un|
+611|Supplier#000000611| 6BVljZ1HeradVcmcDm90NYkla3iHPBsTSoUJr|8|18-554-185-6487|7048.30|al courts sleep carefully about the blithely express accounts. fluffily even request|
+612|Supplier#000000612|gt9T2nnuWBiy5zcrWG2iSdZt,sAEYnD6|23|33-377-769-8060|-118.86| regular requests after the slyly regular packages belie|
+613|Supplier#000000613|DYwZjMQj26Es8D8pxn2zx|11|21-796-340-9401|2201.94|e furiously. final foxes haggle carefully quickly express theodolites. regular deposits affix bli|
+614|Supplier#000000614|DteCEt557XpSo8CejUUbFm RgTeT4FRz7bC,6l|14|24-185-488-4015|1956.55|inal theodolites shall have to boost. unusual theodolites are |
+615|Supplier#000000615|dIT3WOBBwUuakVwd965N4logoVW1A|23|33-196-233-6474|2741.54|platelets. pending, regular|
+616|Supplier#000000616|Ktao GA3 5k7oF,wkDyhc0uatR72dD65pD|6|16-738-270-6883|7119.71|al packages are carefully after the regular p|
+617|Supplier#000000617|Q4haZeO8aVzl2dXziDw3f|7|17-563-347-4748|1002.43| blithely unusual theodolites haggle furiously. even courts use quickly against the |
+618|Supplier#000000618|mPrv5o5d22wyXUgUw69x8m dtx7I3mMh|4|14-912-871-9422|4710.51| the furiously pending deposits x-ray about the fluffily unusual accounts|
+619|Supplier#000000619|CzILYciyb3fdioa9LflK,ADrP|17|27-505-962-3048|559.50|ironic asymptotes. express, final pinto beans are furiously inside the furiously regu|
+620|Supplier#000000620|5pd GQ2NTM3c2uR,gCg9NspSE|24|34-347-881-4300|7516.12|posits promise. quickly express accounts according to the regularly pending accounts lose blithely |
+621|Supplier#000000621|fjFomMNvcBWHb|6|16-492-530-5790|8436.37|ns. even, even platelets up the carefully pending platelets u|
+622|Supplier#000000622|gCQimU1jYHoQiglDmW1FkQM9wzi YC1P15pMy1|21|31-421-544-2948|9199.28|ent instructions. furiously silent packages detect regularly quickly even somas. even pearls ha|
+623|Supplier#000000623|dSSQ3dTYwThbLppbetVUeuPfBIUF|7|17-593-337-7365|5408.07|ial frays use. carefully special foxes wake carefully slyly pending deposits-- final requests a|
+624|Supplier#000000624|JlCK4aBP3PCO|1|11-487-571-9291|9497.65|the silent, final pinto be|
+625|Supplier#000000625|0zW5d Hyogg0z,sXxl1PHS0Ya,muKs4N dS7|23|33-946-363-3870|4065.25|unts. silent accounts around the quickly final r|
+626|Supplier#000000626|uaYN3Mg6sVtD|13|23-672-133-9305|1586.50|ic deposits above the blit|
+627|Supplier#000000627|k6W51QENyVPe4 6z0ajEMTkEA|11|21-632-574-8701|9318.43|l instructions serve slyly regular deposits. carefully busy excuses cajole quickly pending d|
+628|Supplier#000000628|Gk75kOa26bzFvztn3rgkiOdL M0jTMU|0|10-599-740-9848|5975.00|ccounts play along the special ideas. blithely final deposi|
+629|Supplier#000000629|SjPnzOlaF3,4u1QAyH

<TRUNCATED>

[38/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/integration/spark-common-test/src/test/resources/tpch/orders.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/tpch/orders.csv b/integration/spark-common-test/src/test/resources/tpch/orders.csv
new file mode 100644
index 0000000..56b3064
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/tpch/orders.csv
@@ -0,0 +1,1000 @@
+1|36901|O|173665.47|1996-01-02|5-LOW|Clerk#000000951|0|nstructions sleep furiously among |
+2|78002|O|46929.18|1996-12-01|1-URGENT|Clerk#000000880|0| foxes. pending accounts at the pending, silent asymptot|
+3|123314|F|193846.25|1993-10-14|5-LOW|Clerk#000000955|0|sly final accounts boost. carefully regular ideas cajole carefully. depos|
+4|136777|O|32151.78|1995-10-11|5-LOW|Clerk#000000124|0|sits. slyly regular warthogs cajole. regular, regular theodolites acro|
+5|44485|F|144659.20|1994-07-30|5-LOW|Clerk#000000925|0|quickly. bold deposits sleep slyly. packages use slyly|
+6|55624|F|58749.59|1992-02-21|4-NOT SPECIFIED|Clerk#000000058|0|ggle. special, final requests are against the furiously specia|
+7|39136|O|252004.18|1996-01-10|2-HIGH|Clerk#000000470|0|ly special requests |
+32|130057|O|208660.75|1995-07-16|2-HIGH|Clerk#000000616|0|ise blithely bold, regular requests. quickly unusual dep|
+33|66958|F|163243.98|1993-10-27|3-MEDIUM|Clerk#000000409|0|uriously. furiously final request|
+34|61001|O|58949.67|1998-07-21|3-MEDIUM|Clerk#000000223|0|ly final packages. fluffily final deposits wake blithely ideas. spe|
+35|127588|O|253724.56|1995-10-23|4-NOT SPECIFIED|Clerk#000000259|0|zzle. carefully enticing deposits nag furio|
+36|115252|O|68289.96|1995-11-03|1-URGENT|Clerk#000000358|0| quick packages are blithely. slyly silent accounts wake qu|
+37|86116|F|206680.66|1992-06-03|3-MEDIUM|Clerk#000000456|0|kly regular pinto beans. carefully unusual waters cajole never|
+38|124828|O|82500.05|1996-08-21|4-NOT SPECIFIED|Clerk#000000604|0|haggle blithely. furiously express ideas haggle blithely furiously regular re|
+39|81763|O|341734.47|1996-09-20|3-MEDIUM|Clerk#000000659|0|ole express, ironic requests: ir|
+64|32113|F|39414.99|1994-07-16|3-MEDIUM|Clerk#000000661|0|wake fluffily. sometimes ironic pinto beans about the dolphin|
+65|16252|P|110643.60|1995-03-18|1-URGENT|Clerk#000000632|0|ular requests are blithely pending orbits-- even requests against the deposit|
+66|129200|F|103740.67|1994-01-20|5-LOW|Clerk#000000743|0|y pending requests integrate|
+67|56614|O|169405.01|1996-12-19|4-NOT SPECIFIED|Clerk#000000547|0|symptotes haggle slyly around the furiously iron|
+68|28547|O|330793.52|1998-04-18|3-MEDIUM|Clerk#000000440|0| pinto beans sleep carefully. blithely ironic deposits haggle furiously acro|
+69|84487|F|197689.49|1994-06-04|4-NOT SPECIFIED|Clerk#000000330|0| depths atop the slyly thin deposits detect among the furiously silent accou|
+70|64340|F|113534.42|1993-12-18|5-LOW|Clerk#000000322|0| carefully ironic request|
+71|3373|O|276992.74|1998-01-24|4-NOT SPECIFIED|Clerk#000000271|0| express deposits along the blithely regul|
+96|107779|F|68989.90|1994-04-17|2-HIGH|Clerk#000000395|0|oost furiously. pinto|
+97|21061|F|110512.84|1993-01-29|3-MEDIUM|Clerk#000000547|0|hang blithely along the regular accounts. furiously even ideas after the|
+98|104480|F|69168.33|1994-09-25|1-URGENT|Clerk#000000448|0|c asymptotes. quickly regular packages should have to nag re|
+99|88910|F|112126.95|1994-03-13|4-NOT SPECIFIED|Clerk#000000973|0|e carefully ironic packages. pending|
+100|147004|O|187782.63|1998-02-28|4-NOT SPECIFIED|Clerk#000000577|0|heodolites detect slyly alongside of the ent|
+101|27998|O|124906.11|1996-03-17|3-MEDIUM|Clerk#000000419|0|ding accounts above the slyly final asymptote|
+102|716|O|164529.10|1997-05-09|2-HIGH|Clerk#000000596|0| slyly according to the asymptotes. carefully final packages integrate furious|
+103|29101|O|126990.79|1996-06-20|4-NOT SPECIFIED|Clerk#000000090|0|ges. carefully unusual instructions haggle quickly regular f|
+128|73957|F|66195.16|1992-06-15|1-URGENT|Clerk#000000385|0|ns integrate fluffily. ironic asymptotes after the regular excuses nag around |
+129|71134|F|261013.14|1992-11-19|5-LOW|Clerk#000000859|0|ing tithes. carefully pending deposits boost about the silently express |
+130|36964|F|189484.12|1992-05-08|2-HIGH|Clerk#000000036|0|le slyly unusual, regular packages? express deposits det|
+131|92749|F|130464.09|1994-06-08|3-MEDIUM|Clerk#000000625|0|after the fluffily special foxes integrate s|
+132|26395|F|144947.21|1993-06-11|3-MEDIUM|Clerk#000000488|0|sits are daringly accounts. carefully regular foxes sleep slyly about the|
+133|44000|O|114663.57|1997-11-29|1-URGENT|Clerk#000000738|0|usly final asymptotes |
+134|6199|F|200354.30|1992-05-01|4-NOT SPECIFIED|Clerk#000000711|0|lar theodolites boos|
+135|60481|O|213713.99|1995-10-21|4-NOT SPECIFIED|Clerk#000000804|0|l platelets use according t|
+160|82495|O|124132.06|1996-12-19|4-NOT SPECIFIED|Clerk#000000342|0|thely special sauternes wake slyly of t|
+161|16619|F|34439.93|1994-08-31|2-HIGH|Clerk#000000322|0|carefully! special instructions sin|
+162|14116|O|2726.45|1995-05-08|3-MEDIUM|Clerk#000000378|0|nts hinder fluffily ironic instructions. express, express excuses |
+163|87760|O|202379.28|1997-09-05|3-MEDIUM|Clerk#000000379|0|y final packages. final foxes since the quickly even|
+164|779|F|301925.76|1992-10-21|5-LOW|Clerk#000000209|0|cajole ironic courts. slyly final ideas are slyly. blithely final Tiresias sub|
+165|27238|F|188023.67|1993-01-30|4-NOT SPECIFIED|Clerk#000000292|0|across the blithely regular accounts. bold|
+166|107812|O|154451.92|1995-09-12|2-HIGH|Clerk#000000440|0|lets. ironic, bold asymptotes kindle|
+167|119402|F|71124.35|1993-01-04|4-NOT SPECIFIED|Clerk#000000731|0|s nag furiously bold excuses. fluffily iron|
+192|82570|O|167668.42|1997-11-25|5-LOW|Clerk#000000483|0|y unusual platelets among the final instructions integrate rut|
+193|79061|F|88699.17|1993-08-08|1-URGENT|Clerk#000000025|0|the furiously final pin|
+194|61724|F|176707.84|1992-04-05|3-MEDIUM|Clerk#000000352|0|egular requests haggle slyly regular, regular pinto beans. asymptote|
+195|135421|F|191542.31|1993-12-28|3-MEDIUM|Clerk#000000216|0|old forges are furiously sheaves. slyly fi|
+196|64825|F|47095.31|1993-03-17|2-HIGH|Clerk#000000988|0|beans boost at the foxes. silent foxes|
+197|32512|P|163700.57|1995-04-07|2-HIGH|Clerk#000000969|0|solve quickly about the even braids. carefully express deposits affix care|
+198|110218|O|176189.31|1998-01-02|4-NOT SPECIFIED|Clerk#000000331|0|its. carefully ironic requests sleep. furiously express fox|
+199|52970|O|112986.49|1996-03-07|2-HIGH|Clerk#000000489|0|g theodolites. special packag|
+224|2476|F|259960.36|1994-06-18|4-NOT SPECIFIED|Clerk#000000642|0|r the quickly thin courts. carefully|
+225|33031|P|220441.09|1995-05-25|1-URGENT|Clerk#000000177|0|s. blithely ironic accounts wake quickly fluffily special acc|
+226|127466|F|313134.62|1993-03-10|2-HIGH|Clerk#000000756|0|s are carefully at the blithely ironic acc|
+227|9883|O|54880.58|1995-11-10|5-LOW|Clerk#000000919|0| express instructions. slyly regul|
+228|44078|F|2749.87|1993-02-25|1-URGENT|Clerk#000000562|0|es was slyly among the regular foxes. blithely regular dependenci|
+229|111728|F|213076.69|1993-12-29|1-URGENT|Clerk#000000628|0|he fluffily even instructions. furiously i|
+230|102535|F|171318.37|1993-10-27|1-URGENT|Clerk#000000520|0|odolites. carefully quick requ|
+231|90818|F|191808.17|1994-09-29|2-HIGH|Clerk#000000446|0| packages haggle slyly after the carefully ironic instruct|
+256|124831|F|130775.45|1993-10-19|4-NOT SPECIFIED|Clerk#000000834|0|he fluffily final ideas might are final accounts. carefully f|
+257|122693|O|8649.81|1998-03-28|3-MEDIUM|Clerk#000000680|0|ts against the sly warhorses cajole slyly accounts|
+258|41861|F|294182.42|1993-12-29|1-URGENT|Clerk#000000167|0|dencies. blithely quick packages cajole. ruthlessly final accounts|
+259|43186|F|123298.34|1993-09-29|4-NOT SPECIFIED|Clerk#000000601|0|ages doubt blithely against the final foxes. carefully express deposits dazzle|
+260|104728|O|272353.52|1996-12-10|3-MEDIUM|Clerk#000000960|0|lently regular pinto beans sleep after the slyly e|
+261|46072|F|319306.86|1993-06-29|3-MEDIUM|Clerk#000000310|0|ully fluffily brave instructions. furiousl|
+262|30352|O|143146.21|1995-11-25|4-NOT SPECIFIED|Clerk#000000551|0|l packages. blithely final pinto beans use carefu|
+263|116069|F|149369.68|1994-05-17|2-HIGH|Clerk#000000088|0| pending instructions. blithely un|
+288|7093|O|266047.60|1997-02-21|1-URGENT|Clerk#000000109|0|uriously final requests. even, final ideas det|
+289|103750|O|211162.52|1997-02-10|3-MEDIUM|Clerk#000000103|0|sily. slyly special excuse|
+290|117952|F|99019.42|1994-01-01|4-NOT SPECIFIED|Clerk#000000735|0|efully dogged deposits. furiou|
+291|141050|F|108550.11|1994-03-13|1-URGENT|Clerk#000000923|0|dolites. carefully regular pinto beans cajol|
+292|22252|F|40637.14|1992-01-13|2-HIGH|Clerk#000000193|0|g pinto beans will have to sleep f|
+293|29929|F|58638.09|1992-10-02|2-HIGH|Clerk#000000629|0|re bold, ironic deposits. platelets c|
+294|50498|F|49457.90|1993-07-16|3-MEDIUM|Clerk#000000499|0|kly according to the frays. final dolphins affix quickly |
+295|18985|F|130843.69|1994-09-29|2-HIGH|Clerk#000000155|0| unusual pinto beans play. regular ideas haggle|
+320|302|O|53743.60|1997-11-21|2-HIGH|Clerk#000000573|0|ar foxes nag blithely|
+321|122591|F|86823.16|1993-03-21|3-MEDIUM|Clerk#000000289|0|equests run. blithely final dependencies after the deposits wake caref|
+322|133546|F|205623.50|1992-03-19|1-URGENT|Clerk#000000158|0|fully across the slyly bold packages. packages against the quickly regular i|
+323|39133|F|119432.07|1994-03-26|1-URGENT|Clerk#000000959|0|arefully pending foxes sleep blithely. slyly express accoun|
+324|105155|F|38451.38|1992-03-20|1-URGENT|Clerk#000000352|0| about the ironic, regular deposits run blithely against the excuses|
+325|40024|F|125113.94|1993-10-17|5-LOW|Clerk#000000844|0|ly sometimes pending pa|
+326|75986|O|327413.14|1995-06-04|2-HIGH|Clerk#000000466|0| requests. furiously ironic asymptotes mold carefully alongside of the blit|
+327|144598|P|38488.56|1995-04-17|5-LOW|Clerk#000000992|0|ng the slyly final courts. slyly even escapades eat |
+352|106456|F|28648.47|1994-03-08|2-HIGH|Clerk#000000932|0|ke slyly bold pinto beans. blithely regular accounts against the spe|
+353|1777|F|249710.43|1993-12-31|5-LOW|Clerk#000000449|0| quiet ideas sleep. even instructions cajole slyly. silently spe|
+354|138268|O|217160.72|1996-03-14|2-HIGH|Clerk#000000511|0|ly regular ideas wake across the slyly silent ideas. final deposits eat b|
+355|70007|F|99516.75|1994-06-14|5-LOW|Clerk#000000532|0|s. sometimes regular requests cajole. regular, pending accounts a|
+356|146809|F|209439.04|1994-06-30|4-NOT SPECIFIED|Clerk#000000944|0|as wake along the bold accounts. even, |
+357|60395|O|157411.61|1996-10-09|2-HIGH|Clerk#000000301|0|e blithely about the express, final accounts. quickl|
+358|2290|F|354132.39|1993-09-20|2-HIGH|Clerk#000000392|0|l, silent instructions are slyly. silently even de|
+359|77600|F|239998.53|1994-12-19|3-MEDIUM|Clerk#000000934|0|n dolphins. special courts above the carefully ironic requests use|
+384|113009|F|166753.71|1992-03-03|5-LOW|Clerk#000000206|0|, even accounts use furiously packages. slyly ironic pla|
+385|32947|O|54948.26|1996-03-22|5-LOW|Clerk#000000600|0|hless accounts unwind bold pain|
+386|60110|F|110216.57|1995-01-25|2-HIGH|Clerk#000000648|0| haggle quickly. stealthily bold asymptotes haggle among the furiously even re|
+387|3296|O|204546.39|1997-01-26|4-NOT SPECIFIED|Clerk#000000768|0| are carefully among the quickly even deposits. furiously silent req|
+388|44668|F|198800.71|1992-12-16|4-NOT SPECIFIED|Clerk#000000356|0|ar foxes above the furiously ironic deposits nag slyly final reque|
+389|126973|F|2519.40|1994-02-17|2-HIGH|Clerk#000000062|0|ing to the regular asymptotes. final, pending foxes about the blithely sil|
+390|102563|O|269761.09|1998-04-07|5-LOW|Clerk#000000404|0|xpress asymptotes use among the regular, final pinto b|
+391|110278|F|20890.17|1994-11-17|2-HIGH|Clerk#000000256|0|orges thrash fluffil|
+416|40130|F|105675.20|1993-09-27|5-LOW|Clerk#000000294|0| the accounts. fluffily bold depo|
+417|54583|F|125155.22|1994-02-06|3-MEDIUM|Clerk#000000468|0|ironic, even packages. thinly unusual accounts sleep along the slyly unusual |
+418|94834|P|53328.48|1995-04-13|4-NOT SPECIFIED|Clerk#000000643|0|. furiously ironic instruc|
+419|116261|O|165454.42|1996-10-01|3-MEDIUM|Clerk#000000376|0|osits. blithely pending theodolites boost carefully|
+420|90145|O|343254.06|1995-10-31|4-NOT SPECIFIED|Clerk#000000756|0|leep carefully final excuses. fluffily pending requests unwind carefully above|
+421|39149|F|1156.67|1992-02-22|5-LOW|Clerk#000000405|0|egular, even packages according to the final, un|
+422|73075|O|188124.81|1997-05-31|4-NOT SPECIFIED|Clerk#000000049|0|aggle carefully across the accounts. regular accounts eat fluffi|
+423|103396|O|50240.88|1996-06-01|1-URGENT|Clerk#000000674|0|quests. deposits cajole quickly. furiously bold accounts haggle q|
+448|149641|O|165954.35|1995-08-21|3-MEDIUM|Clerk#000000597|0| regular, express foxes use blithely. quic|
+449|95767|O|71120.82|1995-07-20|2-HIGH|Clerk#000000841|0|. furiously regular theodolites affix blithely |
+450|47380|P|228518.02|1995-03-05|4-NOT SPECIFIED|Clerk#000000293|0|d theodolites. boldly bold foxes since the pack|
+451|98758|O|141490.92|1998-05-25|5-LOW|Clerk#000000048|0|nic pinto beans. theodolites poach carefully; |
+452|59560|O|3270.20|1997-10-14|1-URGENT|Clerk#000000498|0|t, unusual instructions above the blithely bold pint|
+453|44030|O|329149.33|1997-05-26|5-LOW|Clerk#000000504|0|ss foxes. furiously regular ideas sleep according to t|
+454|48776|O|36743.83|1995-12-27|5-LOW|Clerk#000000890|0|dolites sleep carefully blithely regular deposits. quickly regul|
+455|12098|O|183606.42|1996-12-04|1-URGENT|Clerk#000000796|0| about the final platelets. dependen|
+480|71383|F|23699.64|1993-05-08|5-LOW|Clerk#000000004|0|ealthy pinto beans. fluffily regular requests along the special sheaves wake |
+481|30352|F|201254.08|1992-10-08|2-HIGH|Clerk#000000230|0|ly final ideas. packages haggle fluffily|
+482|125059|O|182312.78|1996-03-26|1-URGENT|Clerk#000000295|0|ts. deposits wake: final acco|
+483|34820|O|70146.28|1995-07-11|2-HIGH|Clerk#000000025|0|cross the carefully final e|
+484|54244|O|327889.57|1997-01-03|3-MEDIUM|Clerk#000000545|0|grouches use. furiously bold accounts maintain. bold, regular deposits|
+485|100561|O|192867.30|1997-03-26|2-HIGH|Clerk#000000105|0| regular ideas nag thinly furiously s|
+486|50861|O|284644.07|1996-03-11|4-NOT SPECIFIED|Clerk#000000803|0|riously dolphins. fluffily ironic requ|
+487|107825|F|90657.45|1992-08-18|1-URGENT|Clerk#000000086|0|ithely unusual courts eat accordi|
+512|63022|P|194834.40|1995-05-20|5-LOW|Clerk#000000814|0|ding requests. carefully express theodolites was quickly. furious|
+513|60569|O|105559.70|1995-05-01|2-HIGH|Clerk#000000522|0|regular packages. pinto beans cajole carefully against the even|
+514|74872|O|154735.68|1996-04-04|2-HIGH|Clerk#000000094|0| cajole furiously. slyly final excuses cajole. slyly special instructions |
+515|141829|F|244660.33|1993-08-29|4-NOT SPECIFIED|Clerk#000000700|0|eposits are furiously furiously silent pinto beans. pending pack|
+516|43903|O|21920.56|1998-04-21|2-HIGH|Clerk#000000305|0|lar, unusual platelets are carefully. even courts sleep bold, final pinto bea|
+517|9220|O|121396.01|1997-04-07|5-LOW|Clerk#000000359|0|slyly pending deposits cajole quickly packages. furiou|
+518|144382|O|355180.76|1998-02-08|2-HIGH|Clerk#000000768|0| the carefully bold accounts. quickly regular excuses are|
+519|62953|O|161219.18|1997-10-31|1-URGENT|Clerk#000000985|0|ains doze furiously against the f|
+544|93385|F|69323.15|1993-02-17|2-HIGH|Clerk#000000145|0|the special, final accounts. dogged dolphins|
+545|63143|O|28984.07|1995-11-07|2-HIGH|Clerk#000000537|0|as. blithely final hockey players about th|
+546|143224|O|23566.29|1996-11-01|2-HIGH|Clerk#000000041|0|osits sleep. slyly special dolphins about the q|
+547|98209|O|149446.88|1996-06-22|3-MEDIUM|Clerk#000000976|0|ing accounts eat. carefully regular packa|
+548|123823|F|165456.46|1994-09-21|1-URGENT|Clerk#000000435|0|arefully express instru|
+549|109921|F|196410.67|1992-07-13|1-URGENT|Clerk#000000196|0|ideas alongside of |
+550|23524|O|42415.31|1995-08-02|1-URGENT|Clerk#000000204|0|t requests. blithely |
+551|89608|O|71032.36|1995-05-30|1-URGENT|Clerk#000000179|0|xpress accounts boost quic|
+576|29518|O|26276.64|1997-05-13|3-MEDIUM|Clerk#000000955|0|l requests affix regular requests. final account|
+577|55126|F|58874.63|1994-12-19|5-LOW|Clerk#000000154|0| deposits engage stealthil|
+578|92582|O|93874.88|1997-01-10|5-LOW|Clerk#000000281|0|e blithely even packages. slyly pending platelets bes|
+579|67057|O|161591.06|1998-03-11|2-HIGH|Clerk#000000862|0| regular instructions. blithely even p|
+580|59221|O|137666.81|1997-07-05|2-HIGH|Clerk#000000314|0|tegrate fluffily regular accou|
+581|68734|O|180542.57|1997-02-23|4-NOT SPECIFIED|Clerk#000000239|0| requests. even requests use slyly. blithely ironic |
+582|49358|O|181843.80|1997-10-21|1-URGENT|Clerk#000000378|0|n pinto beans print a|
+583|47125|O|185521.56|1997-03-19|3-MEDIUM|Clerk#000000792|0|efully express requests. a|
+608|25961|O|85399.20|1996-02-28|3-MEDIUM|Clerk#000000995|0|nic waters wake slyly slyly expre|
+609|125122|F|32712.15|1994-06-01|3-MEDIUM|Clerk#000000348|0|- ironic gifts believe furiously ca|
+610|50665|O|278270.15|1995-08-02|1-URGENT|Clerk#000000610|0|totes. ironic, unusual packag|
+611|105497|F|127852.70|1993-01-27|1-URGENT|Clerk#000000401|0|ounts detect furiously ac|
+612|81430|F|212763.09|1992-10-21|3-MEDIUM|Clerk#000000759|0|boost quickly quickly final excuses. final foxes use bravely afte|
+613|138254|O|38120.35|1995-06-18|2-HIGH|Clerk#000000172|0|ts hinder among the deposits. fluffily ironic depos|
+614|133288|F|337176.45|1992-12-01|2-HIGH|Clerk#000000388|0| deposits! even, daring theodol|
+615|65407|F|50707.14|1992-05-09|5-LOW|Clerk#000000388|0|t to promise asymptotes. packages haggle alongside of the fluffil|
+640|95138|F|238520.33|1993-01-23|2-HIGH|Clerk#000000433|0|r, unusual accounts boost carefully final ideas. slyly silent theod|
+641|132805|F|169757.60|1993-08-30|5-LOW|Clerk#000000175|0|ents cajole furiously about the quickly silent pac|
+642|39907|F|38023.78|1993-12-16|3-MEDIUM|Clerk#000000357|0| among the requests wake slyly alongside of th|
+643|57772|P|254060.24|1995-03-25|2-HIGH|Clerk#000000354|0|g dependencies. regular accounts |
+644|7370|F|272383.85|1992-05-01|1-URGENT|Clerk#000000550|0| blithely unusual platelets haggle ironic, special excuses. excuses unwi|
+645|114224|F|346179.67|1994-12-03|2-HIGH|Clerk#000000090|0|quickly daring theodolites across the regu|
+646|50417|F|236726.73|1994-11-22|2-HIGH|Clerk#000000203|0|carefully even foxes. fina|
+647|142930|O|82009.52|1997-08-07|1-URGENT|Clerk#000000270|0|egular pearls. carefully express asymptotes are. even account|
+672|108085|F|117034.85|1994-04-14|5-LOW|Clerk#000000106|0|egular requests are furiously according to |
+673|79148|F|31899.02|1994-03-10|1-URGENT|Clerk#000000448|0| special pinto beans use quickly furiously even depende|
+674|33130|F|36846.48|1992-08-29|5-LOW|Clerk#000000448|0|ully special deposits. furiously final warhorses affix carefully. fluffily f|
+675|11704|O|206195.97|1997-07-31|2-HIGH|Clerk#000000168|0|ffily between the careful|
+676|37930|O|251888.78|1996-12-13|2-HIGH|Clerk#000000248|0|the final deposits. special, pending|
+677|123877|F|222824.61|1993-11-24|3-MEDIUM|Clerk#000000824|0|uriously special pinto beans cajole carefully. fi|
+678|130645|F|185606.97|1993-02-27|5-LOW|Clerk#000000530|0|. blithely final somas about the|
+679|48494|O|15157.64|1995-12-15|2-HIGH|Clerk#000000853|0|tealthy, final pinto beans haggle slyly. pending platelets about the special, |
+704|84352|O|107139.07|1996-11-21|3-MEDIUM|Clerk#000000682|0|blithely pending platelets wake alongside of the final, iron|
+705|42706|O|105761.54|1997-02-13|4-NOT SPECIFIED|Clerk#000000294|0|ithely regular dependencies. express, even packages sleep slyly pending t|
+706|147367|O|37704.79|1995-09-09|1-URGENT|Clerk#000000448|0|g the packages. deposits caj|
+707|116150|F|98435.28|1994-11-20|3-MEDIUM|Clerk#000000199|0| ideas about the silent, bold deposits nag dolphins|
+708|31534|O|133960.40|1998-07-03|3-MEDIUM|Clerk#000000101|0|lphins cajole about t|
+709|36029|O|90778.89|1998-04-21|1-URGENT|Clerk#000000461|0|ons alongside of the carefully bold pinto bea|
+710|131137|F|295703.61|1993-01-02|5-LOW|Clerk#000000026|0| regular, regular requests boost. fluffily re|
+711|63673|F|124433.21|1993-09-23|4-NOT SPECIFIED|Clerk#000000856|0|its. fluffily regular gifts are furi|
+736|46354|O|176984.57|1998-06-21|5-LOW|Clerk#000000881|0|refully of the final pi|
+737|119666|F|22759.44|1992-04-26|5-LOW|Clerk#000000233|0|ake blithely express, ironic theodolites. blithely special accounts wa|
+738|20752|F|172540.42|1993-03-02|4-NOT SPECIFIED|Clerk#000000669|0|ly even foxes. furiously regular accounts cajole ca|
+739|307|O|243259.37|1998-05-31|5-LOW|Clerk#000000900|0| against the slyly ironic packages nag slyly ironic|
+740|43417|O|96016.31|1995-07-16|3-MEDIUM|Clerk#000000583|0|courts haggle furiously across the final, regul|
+741|104491|O|66343.80|1998-07-07|2-HIGH|Clerk#000000295|0|ic instructions. slyly express instructions solv|
+742|102838|F|305886.71|1994-12-23|5-LOW|Clerk#000000543|0|equests? slyly ironic dolphins boost carefully above the blithely|
+743|78322|O|33545.04|1996-10-04|4-NOT SPECIFIED|Clerk#000000933|0|eans. furiously ironic deposits sleep carefully carefully qui|
+768|97054|O|350817.47|1996-08-20|3-MEDIUM|Clerk#000000411|0|jole slyly ironic packages. slyly even idea|
+769|79987|F|45812.47|1993-06-02|3-MEDIUM|Clerk#000000172|0|ggle furiously. ironic packages haggle slyly. bold platelets affix s|
+770|31909|O|75639.70|1998-05-23|5-LOW|Clerk#000000572|0|heodolites. furiously special pinto beans cajole pac|
+771|44542|O|169115.42|1995-06-17|1-URGENT|Clerk#000000105|0|s. furiously final instructions across the deposit|
+772|96496|F|192141.04|1993-04-17|2-HIGH|Clerk#000000430|0|s boost blithely fluffily idle ideas? fluffily even pin|
+773|131834|F|173949.22|1993-09-26|3-MEDIUM|Clerk#000000307|0|tions are quickly accounts. accounts use bold, even pinto beans. gifts ag|
+774|79582|O|221514.77|1995-12-04|1-URGENT|Clerk#000000883|0|tealthily even depths|
+775|133180|F|76542.35|1995-03-18|1-URGENT|Clerk#000000191|0|kly express requests. fluffily silent accounts poach furiously|
+800|55940|O|127353.10|1998-07-14|2-HIGH|Clerk#000000213|0|y alongside of the pending packages? final platelets nag fluffily carefu|
+801|117790|F|184316.40|1992-02-18|1-URGENT|Clerk#000000186|0|iously from the furiously enticing reques|
+802|136685|F|266057.18|1995-01-05|1-URGENT|Clerk#000000516|0|posits. ironic, pending requests cajole. even theodol|
+803|14365|O|46420.95|1997-04-29|5-LOW|Clerk#000000260|0|ic instructions. even deposits haggle furiously at the deposits-- regular de|
+804|49969|F|137127.01|1993-03-12|3-MEDIUM|Clerk#000000931|0|s. blithely final foxes are about the packag|
+805|126001|O|141519.40|1995-07-05|4-NOT SPECIFIED|Clerk#000000856|0|y according to the fluffily |
+806|130741|O|44387.52|1996-06-20|2-HIGH|Clerk#000000240|0| the ironic packages wake carefully fina|
+807|143524|F|330203.94|1993-11-24|3-MEDIUM|Clerk#000000012|0|refully special tithes. blithely regular accoun|
+832|28921|F|121528.13|1992-04-19|5-LOW|Clerk#000000495|0|xes. bravely regular packages sleep up the furiously bold accou|
+833|55240|F|82735.33|1994-02-13|3-MEDIUM|Clerk#000000437|0|ts haggle quickly across the slyl|
+834|42754|F|60758.34|1994-05-23|3-MEDIUM|Clerk#000000805|0| sleep. quickly even foxes are boldly. slyly express requests use slyly|
+835|64754|O|91841.21|1995-10-08|4-NOT SPECIFIED|Clerk#000000416|0|s about the carefully special foxes haggle quickly about the|
+836|68782|O|119120.78|1996-11-25|4-NOT SPECIFIED|Clerk#000000729|0|ely bold excuses sleep regular ideas. furiously unusual ideas wake furiou|
+837|115846|F|91495.19|1994-06-15|4-NOT SPECIFIED|Clerk#000000563|0|kages sleep slyly above the ironic, final orbits|
+838|16336|O|123076.73|1998-01-29|5-LOW|Clerk#000000213|0| slyly around the slyly even|
+839|27881|O|85422.02|1995-08-08|1-URGENT|Clerk#000000951|0|the carefully even platelets. furiously unusual fo|
+864|137729|O|113810.95|1997-08-17|1-URGENT|Clerk#000000036|0|ly after the slyly regular deposits. express, regular asymptotes nag ca|
+865|2537|F|89144.19|1993-05-04|3-MEDIUM|Clerk#000000337|0|. special packages wake after the carefully final accounts. express pinto be|
+866|38299|F|7998.02|1992-11-28|3-MEDIUM|Clerk#000000718|0|ins after the even, even accounts nod blithel|
+867|25130|F|13207.24|1993-11-16|3-MEDIUM|Clerk#000000877|0|pades nag quickly final, |
+868|103501|F|170831.61|1992-06-09|4-NOT SPECIFIED|Clerk#000000782|0|onic theodolites print carefully. blithely dogge|
+869|134617|O|68638.94|1997-01-12|2-HIGH|Clerk#000000245|0|ar sheaves are slowly. slyly even attainments boost theodolites. furiously|
+870|32185|F|48718.44|1993-06-20|4-NOT SPECIFIED|Clerk#000000123|0|blithely ironic ideas nod. sly, r|
+871|14423|O|274343.38|1995-11-15|5-LOW|Clerk#000000882|0|oss the ironic theodolites.|
+896|1646|F|285882.29|1993-03-09|1-URGENT|Clerk#000000187|0|inal packages eat blithely according to the warhorses. furiously quiet de|
+897|48931|P|77590.87|1995-03-20|1-URGENT|Clerk#000000316|0| wake quickly against |
+898|53575|F|130445.64|1993-06-03|2-HIGH|Clerk#000000611|0|. unusual pinto beans haggle quickly across |
+899|108485|O|154199.50|1998-04-08|5-LOW|Clerk#000000575|0|rts engage carefully final theodolites.|
+900|45805|F|176247.47|1994-10-01|4-NOT SPECIFIED|Clerk#000000060|0| fluffily express deposits nag furiousl|
+901|11255|O|118682.69|1998-07-21|4-NOT SPECIFIED|Clerk#000000929|0|lyly even foxes are furious, silent requests. requests about the quickly |
+902|8515|F|61110.53|1994-07-27|4-NOT SPECIFIED|Clerk#000000811|0|yly final requests over the furiously regula|
+903|10876|O|161961.88|1995-07-07|4-NOT SPECIFIED|Clerk#000000793|0|e slyly about the final pl|
+928|65630|F|331325.36|1995-03-02|5-LOW|Clerk#000000450|0|ithely express pinto beans. |
+929|82661|F|151170.57|1992-10-02|2-HIGH|Clerk#000000160|0|its. furiously even foxes affix carefully finally silent accounts. express req|
+930|130952|F|277890.79|1994-12-17|1-URGENT|Clerk#000000004|0| accounts nag slyly. ironic, ironic accounts wake blithel|
+931|102889|F|155527.98|1992-12-07|1-URGENT|Clerk#000000881|0|ss packages haggle furiously express, regular deposits. even, e|
+932|40321|O|72741.24|1997-05-16|2-HIGH|Clerk#000000218|0|ly express instructions boost furiously reg|
+933|95348|F|85813.96|1992-08-05|4-NOT SPECIFIED|Clerk#000000752|0|ial courts wake permanently against the furiously regular ideas. unusual |
+934|51295|O|32426.69|1996-07-03|1-URGENT|Clerk#000000229|0|ts integrate carefully. sly, regular deposits af|
+935|49171|O|124027.79|1997-09-24|5-LOW|Clerk#000000180|0|iously final deposits cajole. blithely even packages |
+960|34624|F|96360.51|1994-09-21|3-MEDIUM|Clerk#000000120|0|regular accounts. requests|
+961|55069|P|263327.51|1995-06-04|4-NOT SPECIFIED|Clerk#000000720|0|ons nag furiously among the quickl|
+962|35273|F|148591.41|1994-05-06|5-LOW|Clerk#000000463|0|ments nag deposits. fluffily ironic a|
+963|25271|F|76423.01|1994-05-26|3-MEDIUM|Clerk#000000497|0|uses haggle carefully. slyly even dependencies after the packages ha|
+964|75179|O|228119.17|1995-05-20|3-MEDIUM|Clerk#000000657|0|print blithely ironic, careful theodolit|
+965|68437|P|46240.32|1995-05-15|5-LOW|Clerk#000000218|0|iously special packages. slyly pending requests are carefully |
+966|13306|O|163834.51|1998-04-30|2-HIGH|Clerk#000000239|0|special deposits. furious|
+967|109789|F|316887.82|1992-06-21|3-MEDIUM|Clerk#000000167|0|excuses engage quickly bold dep|
+992|53777|O|205597.06|1997-11-11|3-MEDIUM|Clerk#000000875|0|ts. regular pinto beans thrash carefully sl|
+993|79138|O|321155.99|1995-09-10|3-MEDIUM|Clerk#000000894|0|quickly express accounts among the furiously bol|
+994|1177|F|59494.18|1994-04-20|5-LOW|Clerk#000000497|0|ole. slyly bold excuses nag caref|
+995|115975|P|184332.36|1995-05-31|3-MEDIUM|Clerk#000000439|0|deas. blithely final deposits play. express accounts wake blithely caref|
+996|70915|O|64921.33|1997-12-29|1-URGENT|Clerk#000000497|0|arefully final packages into the slyly final requests affix blit|
+997|108689|O|41657.30|1997-05-19|2-HIGH|Clerk#000000651|0|ly express depths. furiously final requests haggle furiously. carefu|
+998|31696|F|112590.31|1994-11-26|4-NOT SPECIFIED|Clerk#000000956|0|ronic dolphins. ironic, bold ideas haggle furiously furious|
+999|60163|F|222435.59|1993-09-05|5-LOW|Clerk#000000464|0|pitaphs sleep. regular accounts use. f|
+1024|3478|O|251344.36|1997-12-23|5-LOW|Clerk#000000903|0| blithely. even, express theodolites cajole slyly across|
+1025|102676|F|110501.91|1995-05-05|2-HIGH|Clerk#000000376|0|ross the slyly final pa|
+1026|71086|O|48462.19|1997-06-04|5-LOW|Clerk#000000223|0|s wake blithely. special acco|
+1027|127951|F|184860.25|1992-06-03|3-MEDIUM|Clerk#000000241|0|equests cajole. slyly final pinto bean|
+1028|68435|F|239745.62|1994-01-01|2-HIGH|Clerk#000000131|0|ts are. final, silent deposits are among the fl|
+1029|128678|F|53661.89|1994-06-21|2-HIGH|Clerk#000000700|0|quests sleep. slyly even foxes wake quickly final theodolites. clo|
+1030|133999|F|23060.31|1994-06-15|5-LOW|Clerk#000000422|0|ully ironic accounts sleep carefully. requests are carefully alongside of the |
+1031|3590|F|190992.29|1994-09-01|3-MEDIUM|Clerk#000000448|0|s; ironic theodolites along the carefully ex|
+1056|27494|F|74962.40|1995-02-11|1-URGENT|Clerk#000000125|0|t, even deposits hang about the slyly special i|
+1057|75811|F|147378.84|1992-02-20|1-URGENT|Clerk#000000124|0|cuses dazzle carefully careful, ironic pinto beans. carefully even theod|
+1058|52951|F|115175.84|1993-04-26|3-MEDIUM|Clerk#000000373|0|kly pending courts haggle. blithely regular sheaves integrate carefully fi|
+1059|126715|F|290211.77|1994-02-27|1-URGENT|Clerk#000000104|0|en accounts. carefully bold packages cajole daringly special depende|
+1060|139636|F|171268.87|1993-02-21|3-MEDIUM|Clerk#000000989|0|l platelets sleep quickly slyly special requests. furiously |
+1061|102679|O|202433.83|1998-05-15|5-LOW|Clerk#000000576|0|uests sleep at the packages. fur|
+1062|104914|O|42297.06|1997-01-15|1-URGENT|Clerk#000000152|0|eposits use blithely |
+1063|36887|F|43093.98|1994-04-02|2-HIGH|Clerk#000000024|0|deposits nag quickly regular deposits. quickl|
+1088|146881|F|86154.28|1992-05-21|5-LOW|Clerk#000000347|0|counts are blithely. platelets print. carefully |
+1089|48008|O|141081.30|1996-05-04|4-NOT SPECIFIED|Clerk#000000226|0|ns haggle ruthlessly. even requests are quickly abov|
+1090|17105|O|48809.49|1997-11-15|2-HIGH|Clerk#000000300|0| furiously regular platelets haggle along the slyly unusual foxes! |
+1091|82777|O|44655.97|1996-08-27|1-URGENT|Clerk#000000549|0| even pinto beans haggle quickly alongside of the eve|
+1092|123143|P|130255.61|1995-03-04|3-MEDIUM|Clerk#000000006|0|re quickly along the blithe|
+1093|100664|O|111594.69|1997-07-31|4-NOT SPECIFIED|Clerk#000000159|0| after the carefully ironic requests. carefully ironic packages wake fluffil|
+1094|143735|O|13365.87|1997-12-24|3-MEDIUM|Clerk#000000570|0|beans affix furiously about the pending, even deposits. finally pendi|
+1095|143288|O|261418.78|1995-08-22|3-MEDIUM|Clerk#000000709|0|sly bold requests cajole carefully according to|
+1120|139016|O|147299.61|1997-11-07|3-MEDIUM|Clerk#000000319|0|lly special requests. slyly pending platelets are quickly pending requ|
+1121|28006|O|350404.56|1997-01-13|3-MEDIUM|Clerk#000000541|0|r escapades. deposits above the fluffily bold requests hag|
+1122|119527|O|276067.44|1997-01-10|1-URGENT|Clerk#000000083|0|uffily carefully final theodolites. furiously express packages affix|
+1123|71377|O|151589.33|1996-08-03|3-MEDIUM|Clerk#000000929|0|uriously pending requests. slyly regular instruction|
+1124|79451|O|220504.35|1998-07-30|5-LOW|Clerk#000000326|0|regular pinto beans along the fluffily silent packages|
+1125|24124|F|121277.33|1994-10-27|2-HIGH|Clerk#000000510|0|ithely final requests. i|
+1126|143203|O|71808.49|1998-01-28|4-NOT SPECIFIED|Clerk#000000928|0|d slyly regular ideas: special ideas believe slyly. slyly ironic sheaves w|
+1127|57418|O|172427.38|1995-09-19|4-NOT SPECIFIED|Clerk#000000397|0|usly silent, regular pinto beans. blithely express requests boos|
+1152|48952|F|86796.49|1994-08-14|4-NOT SPECIFIED|Clerk#000000496|0|equests. deposits ab|
+1153|119551|O|313658.75|1996-04-18|5-LOW|Clerk#000000059|0| across the pending deposi|
+1154|35111|F|270074.59|1992-02-15|1-URGENT|Clerk#000000268|0|old asymptotes are special requests. blithely even deposits sleep furiously|
+1155|149660|O|190259.22|1997-10-06|2-HIGH|Clerk#000000164|0|c deposits haggle among the ironic, even requests. carefully ironic sheaves n|
+1156|131657|O|297308.42|1996-10-19|1-URGENT|Clerk#000000200|0| blithely ironic dolphins. furiously pendi|
+1157|96710|O|148961.00|1998-01-14|4-NOT SPECIFIED|Clerk#000000207|0|out the regular excuses boost carefully against the furio|
+1158|141295|O|34242.19|1996-06-30|2-HIGH|Clerk#000000549|0|integrate slyly furiously ironic deposit|
+1159|68326|F|64326.40|1992-09-18|3-MEDIUM|Clerk#000000992|0|ts may sleep. requests according to the|
+1184|88495|O|69512.57|1997-10-26|5-LOW|Clerk#000000777|0|iously even packages haggle fluffily care|
+1185|73447|F|79682.95|1992-08-24|5-LOW|Clerk#000000344|0| even escapades are. package|
+1186|58823|O|135515.15|1996-08-15|4-NOT SPECIFIED|Clerk#000000798|0|ingly regular pinto beans: instructi|
+1187|133642|F|125456.57|1992-11-20|3-MEDIUM|Clerk#000000047|0|s after the furiously final deposits boost slyly under the|
+1188|19709|O|86078.34|1996-04-11|2-HIGH|Clerk#000000256|0|ully ironic deposits. slyl|
+1189|44359|F|122883.08|1994-04-09|1-URGENT|Clerk#000000243|0|f the even accounts. courts print blithely ironic accounts. sile|
+1190|11932|O|55891.86|1997-03-16|5-LOW|Clerk#000000575|0|ccounts above the foxes integrate carefully after the |
+1191|110275|O|37286.20|1995-11-07|3-MEDIUM|Clerk#000000011|0|uests nag furiously. carefully even requests|
+1216|121996|F|92011.72|1992-12-07|5-LOW|Clerk#000000918|0|nal foxes around the e|
+1217|6329|F|47260.47|1992-04-26|4-NOT SPECIFIED|Clerk#000000538|0| foxes nag quickly. ironic excuses nod. blithely pending|
+1218|8240|F|154082.80|1994-06-20|4-NOT SPECIFIED|Clerk#000000994|0|s cajole. special, silent deposits about the theo|
+1219|27010|O|17690.25|1995-10-05|3-MEDIUM|Clerk#000000800|0|od carefully. slyly final dependencies across the even fray|
+1220|48490|O|202449.30|1996-08-29|1-URGENT|Clerk#000000712|0|inal theodolites wake. fluffily ironic asymptotes cajol|
+1221|13318|F|164228.07|1992-04-19|4-NOT SPECIFIED|Clerk#000000852|0| detect against the silent, even deposits. carefully ironic|
+1222|9703|F|72907.53|1993-02-05|3-MEDIUM|Clerk#000000811|0|theodolites use quickly even accounts. carefully final asympto|
+1223|9001|O|38985.09|1996-05-25|4-NOT SPECIFIED|Clerk#000000238|0|posits was blithely fr|
+1248|47026|F|262081.09|1992-01-02|1-URGENT|Clerk#000000890|0|t the carefully regular dugouts. s|
+1249|149620|F|53358.24|1994-01-05|1-URGENT|Clerk#000000095|0|al ideas sleep above the pending pin|
+1250|35818|F|13394.58|1992-09-29|4-NOT SPECIFIED|Clerk#000000652|0|ts after the fluffily pending instructions use slyly about the s|
+1251|37735|O|185379.58|1997-10-30|1-URGENT|Clerk#000000276|0|, brave sauternes. deposits boost fluffily.|
+1252|149900|O|148226.45|1997-08-04|5-LOW|Clerk#000000348|0|ng the slyly regular excuses. special courts nag furiously blithely e|
+1253|114421|F|158706.12|1993-01-26|1-URGENT|Clerk#000000775|0| requests sleep furiously even foxes. ruthless packag|
+1254|69800|O|133418.92|1995-12-22|1-URGENT|Clerk#000000607|0| pinto beans. carefully regular request|
+1255|121670|F|104278.55|1994-05-30|4-NOT SPECIFIED|Clerk#000000798|0|ct slyly regular accounts. quick|
+1280|95518|F|147112.51|1993-01-11|5-LOW|Clerk#000000160|0|posits thrash quickly after the theodolites. furiously iro|
+1281|61099|F|247632.44|1994-12-11|1-URGENT|Clerk#000000430|0|counts. carefully pending accounts eat |
+1282|115915|F|85655.22|1992-02-29|4-NOT SPECIFIED|Clerk#000000168|0|he quickly special packages. furiously final re|
+1283|116326|O|315573.61|1996-08-30|4-NOT SPECIFIED|Clerk#000000260|0| pinto beans boost slyly ac|
+1284|133972|O|153911.68|1996-01-07|2-HIGH|Clerk#000000492|0|s. blithely silent deposits s|
+1285|10342|F|194729.03|1992-06-01|1-URGENT|Clerk#000000423|0|cial deposits cajole after the ironic requests. p|
+1286|108556|F|293396.88|1993-05-14|4-NOT SPECIFIED|Clerk#000000939|0| deposits use carefully from the excuses. slyly bold p|
+1287|17269|F|189405.61|1994-07-05|2-HIGH|Clerk#000000288|0|ly ironic dolphins integrate furiously among the final packages. st|
+1312|110999|F|109616.14|1994-05-19|3-MEDIUM|Clerk#000000538|0|n, express accounts across the ironic|
+1313|147671|F|64234.24|1994-09-13|1-URGENT|Clerk#000000774|0|ld accounts. regular deposits cajole. ironically pending theodolites use car|
+1314|142327|F|82075.80|1994-05-13|3-MEDIUM|Clerk#000000485|0|ickly blithe packages nod ideas. furiously bold braids boost around the car|
+1315|21430|O|185692.25|1998-03-22|5-LOW|Clerk#000000840|0|final theodolites alongside of the carefu|
+1316|15902|F|227746.24|1993-12-03|1-URGENT|Clerk#000000857|0|ully bold theodolites? pending, bold pin|
+1317|99083|P|194175.54|1995-05-19|2-HIGH|Clerk#000000373|0|sts. furiously special deposits lose fur|
+1318|127780|O|130357.62|1998-06-27|3-MEDIUM|Clerk#000000581|0|s hang bold requests. pending, re|
+1319|31150|O|58398.49|1996-09-27|2-HIGH|Clerk#000000257|0|y across the ruthlessly ironic accounts. unusu|
+1344|16990|F|67233.49|1992-04-16|5-LOW|Clerk#000000178|0|omise close, silent requests. pending theodolites boost pending |
+1345|94207|F|145703.02|1992-10-28|5-LOW|Clerk#000000447|0| regular tithes. quickly fluffy de|
+1346|74749|F|212488.52|1992-06-18|2-HIGH|Clerk#000000374|0|ges sleep quickly-- even pint|
+1347|40270|O|226083.68|1997-06-20|5-LOW|Clerk#000000977|0|he furiously even foxes use carefully express req|
+1348|17345|O|143342.43|1998-04-18|5-LOW|Clerk#000000206|0|tly. quickly even deposi|
+1349|63691|O|77455.85|1997-10-26|1-URGENT|Clerk#000000543|0|yly! blithely special theodolites cajole. unusual, reg|
+1350|50701|F|82352.77|1993-08-24|1-URGENT|Clerk#000000635|0|iously about the blithely special a|
+1351|105941|O|30164.33|1998-04-20|1-URGENT|Clerk#000000012|0| cajole. regular, special re|
+1376|46036|O|35815.36|1997-05-04|4-NOT SPECIFIED|Clerk#000000730|0|der furiously final, final frets. carefull|
+1377|19717|O|141575.94|1998-04-24|4-NOT SPECIFIED|Clerk#000000625|0|lly across the blithely express accounts. ironic excuses promise carefully de|
+1378|19532|O|159202.17|1996-03-09|4-NOT SPECIFIED|Clerk#000000705|0| furiously even tithes cajole slyly among the quick|
+1379|64160|O|109341.27|1998-05-25|5-LOW|Clerk#000000861|0|y deposits are caref|
+1380|136237|O|164979.12|1996-07-07|3-MEDIUM|Clerk#000000969|0|inal deposits wake slyly daringly even requests. bold, even foxe|
+1381|126340|O|63023.63|1998-05-25|3-MEDIUM|Clerk#000000107|0|even requests breach after the bold, ironic instructions. slyly even|
+1382|132247|F|284973.82|1993-08-17|5-LOW|Clerk#000000241|0|fully final packages sl|
+1383|119570|F|49378.61|1993-04-27|2-HIGH|Clerk#000000785|0|ts. express requests sleep blithel|
+1408|53897|O|284152.85|1997-12-26|4-NOT SPECIFIED|Clerk#000000942|0|t the quickly final asymptotes. unusual|
+1409|142885|F|106709.32|1992-12-31|4-NOT SPECIFIED|Clerk#000000065|0|ructions. furiously unusual excuses are regular, unusual theodolites. fin|
+1410|112102|O|155102.95|1997-04-12|5-LOW|Clerk#000000123|0|iously along the bravely regular dolphins. pinto beans cajole furiously sp|
+1411|94411|F|238723.96|1994-12-21|2-HIGH|Clerk#000000566|0|s. furiously special excuses across the pending pinto beans haggle sp|
+1412|52877|F|95898.00|1993-03-13|4-NOT SPECIFIED|Clerk#000000083|0|uffily daring theodolit|
+1413|90539|O|109266.41|1997-06-14|3-MEDIUM|Clerk#000000342|0|, ironic instructions. carefully even packages dazzle|
+1414|76268|O|48480.38|1995-08-16|1-URGENT|Clerk#000000883|0|ccounts. ironic foxes haggle car|
+1415|78697|F|30155.90|1994-05-29|4-NOT SPECIFIED|Clerk#000000601|0|rays. blithely final ideas affix quickl|
+1440|97780|O|85519.29|1995-08-10|5-LOW|Clerk#000000956|0| pending requests. closely s|
+1441|121822|O|258458.04|1997-03-06|4-NOT SPECIFIED|Clerk#000000156|0|ter the excuses. ironic dependencies m|
+1442|110555|F|7714.98|1994-07-05|4-NOT SPECIFIED|Clerk#000000935|0|nal pinto beans. slyly ironic ideas cajol|
+1443|43822|O|77129.49|1996-12-16|5-LOW|Clerk#000000185|0|x blithely against the carefully final somas. even asymptotes are. quickly spe|
+1444|133246|F|313834.99|1994-12-06|3-MEDIUM|Clerk#000000783|0|ove the bold accounts cajole fluffily about|
+1445|113581|F|229740.46|1995-01-10|3-MEDIUM|Clerk#000000211|0|even packages wake fluffily |
+1446|40025|O|52330.27|1998-02-16|5-LOW|Clerk#000000274|0|lly regular notornis above the requests sleep final accounts! |
+1447|89999|F|154678.72|1992-10-15|2-HIGH|Clerk#000000880|0|inly against the blithely pending excuses. regular, pe|
+1472|149854|O|78018.69|1996-10-06|5-LOW|Clerk#000000303|0|y special dolphins around the final dependencies wake quick|
+1473|92225|O|118262.03|1997-03-17|3-MEDIUM|Clerk#000000960|0|furiously close accoun|
+1474|68972|F|76044.52|1995-01-09|1-URGENT|Clerk#000000438|0|detect quickly above the carefully even |
+1475|4730|O|322140.89|1997-11-12|2-HIGH|Clerk#000000972|0|cally final packages boost. blithely ironic packa|
+1476|144535|O|29828.57|1996-06-27|2-HIGH|Clerk#000000673|0|ding accounts hinder alongside of the quickly pending requests. fluf|
+1477|75490|O|372279.48|1997-08-24|5-LOW|Clerk#000000612|0|ly bold foxes. final ideas would cajo|
+1478|49784|O|35252.04|1997-08-03|2-HIGH|Clerk#000000827|0|lessly. carefully express|
+1479|14575|O|55998.69|1995-12-16|4-NOT SPECIFIED|Clerk#000000697|0|he furiously even foxes. thinly bold deposits|
+1504|1762|F|121508.90|1992-08-28|3-MEDIUM|Clerk#000000381|0|, brave deposits. bold de|
+1505|35128|F|94940.81|1992-08-21|2-HIGH|Clerk#000000544|0|s. slyly ironic packages cajole. carefully regular packages haggle |
+1506|146018|F|301881.77|1992-09-21|3-MEDIUM|Clerk#000000620|0| dependencies. accounts affix blithely slowly unusual deposits. slyly regular |
+1507|120466|F|161377.33|1993-10-14|3-MEDIUM|Clerk#000000305|0|stealthy, ironic de|
+1508|101203|O|240098.61|1998-04-10|5-LOW|Clerk#000000117|0| after the furiously regular pinto beans hang slyly quickly ironi|
+1509|62380|F|265947.58|1993-07-08|5-LOW|Clerk#000000770|0|the regular ideas. regul|
+1510|52198|O|236397.82|1996-09-17|5-LOW|Clerk#000000128|0|ld carefully. furiously final asymptotes haggle furiously|
+1511|78421|O|86846.78|1996-12-22|4-NOT SPECIFIED|Clerk#000000386|0|ts above the depend|
+1536|93112|O|8588.61|1997-01-26|3-MEDIUM|Clerk#000000117|0|ges are! furiously final deposits cajole iron|
+1537|107512|F|156883.79|1992-02-15|4-NOT SPECIFIED|Clerk#000000862|0|g to the even deposits. ironic, final packages |
+1538|28924|O|280484.51|1995-06-18|4-NOT SPECIFIED|Clerk#000000258|0| instructions. regular theod|
+1539|111724|F|68424.63|1995-03-10|5-LOW|Clerk#000000840|0|nstructions boost pa|
+1540|14125|F|206638.05|1992-08-05|2-HIGH|Clerk#000000927|0|r ideas hinder blithe|
+1541|93925|P|68374.59|1995-05-18|1-URGENT|Clerk#000000906|0|y. slyly ironic warhorses around the furiously regul|
+1542|142649|F|234541.48|1993-09-15|3-MEDIUM|Clerk#000000435|0|t the furiously close deposits do was f|
+1543|50275|O|211397.93|1997-02-20|1-URGENT|Clerk#000000398|0|unts. furiously pend|
+1568|16678|O|130479.00|1997-01-30|4-NOT SPECIFIED|Clerk#000000554|0|d notornis. carefully |
+1569|103582|O|141162.96|1998-04-02|5-LOW|Clerk#000000786|0|orbits. fluffily even decoys serve blithely. furiously furious realms nag acro|
+1570|122278|O|52434.21|1998-03-16|1-URGENT|Clerk#000000745|0|pinto beans haggle furiousl|
+1571|102628|F|202138.38|1992-12-05|2-HIGH|Clerk#000000565|0|ously furiously bold warthogs. slyly ironic instructions are quickly a|
+1572|10399|O|77690.58|1996-02-24|2-HIGH|Clerk#000000994|0|fluffily ironic accounts haggle blithely final platelets! slyly regular foxes|
+1573|147974|F|119124.49|1992-12-28|2-HIGH|Clerk#000000940|0|ess, ironic deposits use along the carefu|
+1574|133400|O|272761.32|1996-12-12|3-MEDIUM|Clerk#000000809|0| ideas hinder after the carefully unusual |
+1575|144616|O|298953.62|1995-09-13|3-MEDIUM|Clerk#000000497|0|. furiously regular dep|
+1600|92563|F|162165.04|1993-03-03|3-MEDIUM|Clerk#000000627|0|tions cajole quietly above the regular, silent requests. slyly fin|
+1601|52390|F|106227.03|1994-08-27|5-LOW|Clerk#000000469|0|ent deposits are ca|
+1602|911|F|7367.82|1993-08-05|5-LOW|Clerk#000000660|0|deposits. busily silent instructions haggle furiously. fin|
+1603|1150|F|35608.23|1993-07-31|4-NOT SPECIFIED|Clerk#000000869|0|s. slyly silent deposits boo|
+1604|112154|F|140038.40|1993-07-17|5-LOW|Clerk#000000512|0|lithely silent waters. blithely unusual packages alongside |
+1605|57578|O|216049.81|1998-04-24|4-NOT SPECIFIED|Clerk#000000616|0|sleep furiously? ruthless, even pinto beans |
+1606|52916|O|157377.77|1997-04-17|4-NOT SPECIFIED|Clerk#000000550|0|r requests. quickly even platelets breach before the ironically|
+1607|149282|O|294026.58|1995-12-16|2-HIGH|Clerk#000000498|0| bold, pending foxes haggle. slyly silent |
+1632|65858|O|233585.38|1997-01-08|3-MEDIUM|Clerk#000000351|0|onic requests are accounts. bold a|
+1633|14204|O|86203.83|1995-10-14|2-HIGH|Clerk#000000666|0|y silent accounts sl|
+1634|68857|O|217318.90|1996-09-10|1-URGENT|Clerk#000000360|0|arefully blithely ironic requests. slyly unusual instructions alongside|
+1635|3727|O|88281.27|1997-02-13|3-MEDIUM|Clerk#000000958|0|s. slyly ironic requests affix slyly |
+1636|78634|O|284967.18|1997-06-17|3-MEDIUM|Clerk#000000457|0|ding requests. slyly ironic courts wake quickl|
+1637|72703|F|296568.42|1995-02-08|4-NOT SPECIFIED|Clerk#000000189|0| final accounts. blithely silent ideas cajole bravely. carefully express |
+1638|138863|O|303176.64|1997-08-13|2-HIGH|Clerk#000000643|0|he fluffily regular asymp|
+1639|4306|O|166740.34|1995-08-20|4-NOT SPECIFIED|Clerk#000000939|0|haggle furiously. final requests detect furious|
+1664|63934|O|227292.67|1996-03-03|1-URGENT|Clerk#000000090|0|y quickly even asymptotes. furiously regular packages haggle quickly fin|
+1665|75635|F|6539.26|1994-05-08|2-HIGH|Clerk#000000920|0|ly regular packages are fluffily even ideas. fluffily final|
+1666|94096|O|206667.34|1995-10-18|1-URGENT|Clerk#000000849|0|ffily pending dependencies wake fluffily. pending, final accounts |
+1667|4570|O|199522.68|1997-10-10|2-HIGH|Clerk#000000103|0|e accounts. slyly express accounts must are a|
+1668|141415|O|236370.74|1997-07-12|4-NOT SPECIFIED|Clerk#000000148|0|eodolites. carefully dogged dolphins haggle q|
+1669|1840|O|33626.40|1997-06-09|3-MEDIUM|Clerk#000000663|0|er ironic requests detect furiously blithely sp|
+1670|24412|O|120967.89|1997-05-24|2-HIGH|Clerk#000000320|0|unusual dependencies. furiously special platelets main|
+1671|34246|O|148836.91|1996-07-27|4-NOT SPECIFIED|Clerk#000000275|0|ly. slyly pending requests was above the |
+1696|3424|O|160855.63|1998-01-08|4-NOT SPECIFIED|Clerk#000000041|0|bravely bold accounts above the quickly bold|
+1697|75703|O|173335.61|1996-10-07|1-URGENT|Clerk#000000815|0|o x-ray blithely. pl|
+1698|39499|O|222923.71|1997-04-23|2-HIGH|Clerk#000000432|0|slyly. carefully express deposit|
+1699|83479|F|98221.37|1993-12-30|1-URGENT|Clerk#000000125|0|jole blithely. furiously un|
+1700|64847|O|160108.82|1996-06-15|3-MEDIUM|Clerk#000000328|0|ely final dolphins wake sometimes above the quietly regular deposits. fur|
+1701|128353|F|108718.92|1992-05-19|2-HIGH|Clerk#000000395|0|furiously. regular, close theodoli|
+1702|65312|P|277549.66|1995-05-07|2-HIGH|Clerk#000000300|0|around the carefully final deposits cajole carefully according to the b|
+1703|133396|F|160080.39|1993-01-28|3-MEDIUM|Clerk#000000463|0| pinto beans poach. bold courts boost. regular, express deposits at|
+1728|62983|O|156964.07|1996-05-22|2-HIGH|Clerk#000000711|0|beans. slyly regular instructions sleep! slyly final packages|
+1729|131087|F|23019.91|1992-05-19|2-HIGH|Clerk#000000158|0|pending foxes wake. accounts|
+1730|123296|O|204656.14|1998-07-24|5-LOW|Clerk#000000794|0| fluffily pending deposits serve. furiously even requests wake furiou|
+1731|127777|O|317068.68|1996-01-06|1-URGENT|Clerk#000000268|0|lithely regular, final instructions. ironic, express packages are above|
+1732|145841|F|285193.99|1993-11-29|5-LOW|Clerk#000000903|0|inal requests integrate dolph|
+1733|147413|O|265070.67|1996-05-12|2-HIGH|Clerk#000000789|0|e carefully according to the accounts. furiously pending instructions sleep|
+1734|6356|F|80326.22|1994-06-11|2-HIGH|Clerk#000000722|0| final ideas haggle. blithely quick foxes sleep busily bold ideas. i|
+1735|20543|F|144256.48|1992-12-27|1-URGENT|Clerk#000000458|0|ully idle requests wake qu|
+1760|114157|O|132205.00|1996-05-17|5-LOW|Clerk#000000917|0| deposits. busily regular deposits wake blithely along the furiously even re|
+1761|104872|F|324805.88|1993-12-24|2-HIGH|Clerk#000000817|0|efully slyly bold frets. packages boost b|
+1762|76610|F|311995.07|1994-08-20|4-NOT SPECIFIED|Clerk#000000653|0|ly ironic packages. furi|
+1763|120416|O|197170.68|1996-10-29|2-HIGH|Clerk#000000321|0|es. bold dependencies haggle furiously along |
+1764|28136|F|58897.70|1992-03-25|1-URGENT|Clerk#000000182|0|. slyly final packages integrate carefully acro|
+1765|72505|O|56026.52|1995-12-03|4-NOT SPECIFIED|Clerk#000000490|0| regular excuses wake slyly|
+1766|138445|O|74166.19|1996-10-12|2-HIGH|Clerk#000000983|0|unusual deposits affix quickly beyond the carefully s|
+1767|24511|P|228388.42|1995-03-14|2-HIGH|Clerk#000000327|0|eposits use carefully carefully regular platelets. quickly regular packages al|
+1792|47098|F|147328.39|1993-11-09|5-LOW|Clerk#000000102|0|ructions haggle along the pending packages. carefully speci|
+1793|17618|F|111992.12|1992-07-12|4-NOT SPECIFIED|Clerk#000000291|0|regular packages cajole. blithely special packages according to the final d|
+1794|139519|O|246303.50|1997-09-28|1-URGENT|Clerk#000000686|0|ally silent pinto beans. regular package|
+1795|92795|F|204658.83|1994-03-19|2-HIGH|Clerk#000000815|0| quickly final packages! blithely dogged accounts c|
+1796|46240|F|64744.60|1992-11-21|2-HIGH|Clerk#000000245|0|eans use furiously around th|
+1797|124300|O|80578.62|1996-05-07|3-MEDIUM|Clerk#000000508|0|quiet platelets haggle since the quickly ironic instructi|
+1798|51518|O|85829.54|1997-07-28|1-URGENT|Clerk#000000741|0|al foxes are blithe|
+1799|60032|F|61448.12|1994-03-07|4-NOT SPECIFIED|Clerk#000000339|0|ns sleep furiously final waters. blithely regular instructions h|
+1824|48187|F|118426.52|1994-05-05|1-URGENT|Clerk#000000972|0|e blithely fluffily|
+1825|146462|F|200329.03|1993-12-05|3-MEDIUM|Clerk#000000345|0|ironic, final accou|
+1826|81313|F|179414.64|1992-04-16|4-NOT SPECIFIED|Clerk#000000718|0|the even asymptotes dazzle fluffily slyly regular asymptotes. final, unu|
+1827|105067|O|321005.97|1996-06-22|4-NOT SPECIFIED|Clerk#000000369|0|luffily even requests haggle sly|
+1828|31387|F|219732.20|1994-04-18|3-MEDIUM|Clerk#000000840|0|y quickly bold packag|
+1829|111292|F|210093.38|1994-05-08|2-HIGH|Clerk#000000537|0| accounts wake above the furiously unusual requests. pending package|
+1830|131414|F|113847.60|1995-02-23|1-URGENT|Clerk#000000045|0|according to the even,|
+1831|70549|F|79652.46|1993-12-02|1-URGENT|Clerk#000000854|0| accounts. carefully even accounts boost furiously. regular ideas engage. |
+1856|104323|F|326087.55|1992-03-20|4-NOT SPECIFIED|Clerk#000000952|0|. special pinto beans run acr|
+1857|131999|F|160202.01|1993-01-13|2-HIGH|Clerk#000000083|0|hely final ideas slee|
+1858|142186|O|61512.66|1997-12-13|1-URGENT|Clerk#000000389|0|thely. slyly final deposits sleep|
+1859|59789|O|176645.71|1997-04-11|4-NOT SPECIFIED|Clerk#000000949|0| the foxes. bravely special excuses nag carefully special r|
+1860|9563|O|17566.30|1996-04-04|3-MEDIUM|Clerk#000000556|0|osits. quickly bold deposits according to |
+1861|68447|F|138942.53|1994-01-03|3-MEDIUM|Clerk#000000847|0|r the fluffily close sauternes. furio|
+1862|32411|O|161079.07|1998-02-24|5-LOW|Clerk#000000348|0|ts snooze ironically abou|
+1863|73084|F|165092.37|1993-09-23|4-NOT SPECIFIED|Clerk#000000658|0|old sentiments. careful, |
+1888|119095|F|337294.96|1993-10-31|4-NOT SPECIFIED|Clerk#000000659|0|olites. pinto beans cajole. regular deposits affix. slyly regular|
+1889|24268|O|119505.82|1997-03-16|1-URGENT|Clerk#000000854|0|p around the regular notornis. unusual deposits|
+1890|9430|O|279232.56|1996-12-18|4-NOT SPECIFIED|Clerk#000000627|0|romise final, regular deposits. regular fox|
+1891|60905|F|84934.79|1994-12-15|5-LOW|Clerk#000000495|0|unusual foxes sleep regular deposits. requests wake special pac|
+1892|23347|F|193320.86|1994-03-26|5-LOW|Clerk#000000733|0|sts. slyly regular dependencies use slyly. ironic, spec|
+1893|124400|O|155189.57|1997-10-30|2-HIGH|Clerk#000000111|0|olites. silent, special deposits eat slyly quickly express packages; hockey p|
+1894|74996|F|56473.79|1992-03-30|1-URGENT|Clerk#000000626|0|e furiously. furiously even accounts are slyly final accounts. closely speci|
+1895|6505|F|52428.06|1994-05-30|3-MEDIUM|Clerk#000000878|0|ress accounts. bold accounts cajole. slyly final pinto beans poach regul|
+1920|109567|O|179768.99|1998-06-24|5-LOW|Clerk#000000018|0|hely; furiously regular excuses|
+1921|86111|F|100912.85|1994-01-18|3-MEDIUM|Clerk#000000293|0|counts. slyly quiet requests along the ruthlessly regular accounts are |
+1922|55513|O|21431.10|1996-07-13|3-MEDIUM|Clerk#000000984|0|side of the blithely final re|
+1923|134458|O|266428.59|1997-07-07|1-URGENT|Clerk#000000471|0| express dolphins. |
+1924|74677|O|258213.13|1996-09-07|4-NOT SPECIFIED|Clerk#000000823|0| of the ironic accounts. instructions near the final instr|
+1925|16696|F|204948.36|1992-03-05|1-URGENT|Clerk#000000986|0|e slyly regular deposits. furiously |
+1926|92971|O|149651.08|1996-01-31|2-HIGH|Clerk#000000568|0|cajole. even warhorses sleep carefully. |
+1927|139840|O|37701.43|1995-09-30|3-MEDIUM|Clerk#000000616|0|riously special packages. permanent pearls wake furiously. even packages alo|
+1952|66143|F|17300.07|1994-03-16|2-HIGH|Clerk#000000254|0| silent accounts boost |
+1953|149407|F|103041.88|1993-11-30|3-MEDIUM|Clerk#000000891|0| fluffily along the quickly even packages. |
+1954|55898|O|224803.85|1997-05-31|4-NOT SPECIFIED|Clerk#000000104|0| unusual excuses cajole according to the blithely regular theodolites.|
+1955|12065|F|141123.20|1992-04-20|1-URGENT|Clerk#000000792|0|ly special ideas. sometimes final |
+1956|125753|F|157551.61|1992-09-20|4-NOT SPECIFIED|Clerk#000000600|0|ironic ideas are silent ideas. furiously final deposits sleep slyly carefu|
+1957|30107|O|91793.95|1998-07-21|2-HIGH|Clerk#000000639|0|nding excuses about the |
+1958|52151|O|249441.38|1995-09-22|5-LOW|Clerk#000000343|0| haggle blithely. flu|
+1959|42370|O|98535.63|1997-01-13|4-NOT SPECIFIED|Clerk#000000631|0| cajole about the blithely express requests. even excuses mold bl|
+1984|50668|O|118338.24|1998-04-01|1-URGENT|Clerk#000000416|0| slyly special instructions. unusual foxes use packages. carefully regular req|
+1985|5810|F|239317.63|1994-09-02|4-NOT SPECIFIED|Clerk#000000741|0|slyly slyly even pains. slyly reg|
+1986|148409|F|55996.25|1994-05-05|2-HIGH|Clerk#000000609|0|across the theodolites. quick|
+1987|98189|F|6525.18|1994-04-30|2-HIGH|Clerk#000000652|0|gular platelets alongside |
+1988|107392|O|160700.63|1995-10-06|4-NOT SPECIFIED|Clerk#000000011|0|ly ironic dolphins serve quickly busy accounts. bu|
+1989|116147|F|76837.41|1994-03-16|4-NOT SPECIFIED|Clerk#000000747|0|ely bold pinto beans ha|
+1990|118060|F|95601.98|1994-12-16|2-HIGH|Clerk#000000114|0|e bold patterns. always regul|
+1991|18529|F|198410.02|1992-09-07|4-NOT SPECIFIED|Clerk#000000854|0|ing accounts can haggle at the carefully final Tiresias-- pending, regular|
+2016|7513|O|37641.83|1996-08-16|3-MEDIUM|Clerk#000000641|0|the carefully ironic foxes. requests nag bold, r|
+2017|100576|O|101688.15|1998-05-13|3-MEDIUM|Clerk#000000427|0|nusual requests. blit|
+2018|17590|P|27854.73|1995-04-05|4-NOT SPECIFIED|Clerk#000000920|0|gular accounts wake fur|
+2019|134029|F|65697.09|1992-10-23|1-URGENT|Clerk#000000565|0| furiously bold packages. fluffily fi|
+2020|72448|F|231691.79|1993-06-21|3-MEDIUM|Clerk#000000192|0|es. furiously regular packages above the furiously special theodolites are a|
+2021|69850|O|36978.12|1995-07-15|1-URGENT|Clerk#000000155|0|ong the furiously regular requests. unusual deposits wake fluffily inside|
+2022|61813|F|318901.90|1992-03-15|1-URGENT|Clerk#000000268|0| dependencies sleep fluffily even, ironic deposits. express, silen|
+2023|117961|F|252009.19|1992-05-06|5-LOW|Clerk#000000137|0|ular courts engage according to the|
+2048|16763|F|46866.20|1993-11-15|1-URGENT|Clerk#000000934|0|s cajole after the blithely final accounts. f|
+2049|29749|O|208461.58|1995-12-07|2-HIGH|Clerk#000000859|0|ly regular requests thrash blithely about the fluffily even theodolites. r|
+2050|27259|F|304485.79|1994-06-02|4-NOT SPECIFIED|Clerk#000000821|0|d accounts against the furiously regular packages use bli|
+2051|38071|O|116180.92|1996-03-18|4-NOT SPECIFIED|Clerk#000000333|0|ctions sleep blithely. blithely regu|
+2052|89338|F|254487.96|1992-04-13|2-HIGH|Clerk#000000767|0| requests sleep around the even, even courts. ironic theodolites affix furious|
+2053|140578|F|190120.72|1995-02-07|1-URGENT|Clerk#000000717|0|ar requests: blithely sly accounts boost carefully across t|
+2054|40088|F|246569.23|1992-06-08|4-NOT SPECIFIED|Clerk#000000103|0|l requests affix carefully about the furiously special|
+2055|96850|F|83312.64|1993-09-04|1-URGENT|Clerk#000000067|0|. warhorses affix slyly blithely express instructions? fur|
+2080|94459|F|86061.80|1993-06-18|5-LOW|Clerk#000000190|0|ironic, pending theodolites are carefully about the quickly regular theodolite|
+2081|119239|O|227153.78|1997-07-05|2-HIGH|Clerk#000000136|0|ong the regular theo|
+2082|48568|F|69210.02|1995-01-10|2-HIGH|Clerk#000000354|0|cial accounts. ironic, express dolphins nod slyly sometimes final reques|
+2083|100723|F|63345.71|1993-07-14|3-MEDIUM|Clerk#000000361|0|al patterns. bold, final foxes nag bravely about the furiously express|
+2084|79970|F|285435.84|1993-03-17|2-HIGH|Clerk#000000048|0|zle furiously final, careful packages. slyly ironic ideas amo|
+2085|48739|F|54707.54|1993-11-21|3-MEDIUM|Clerk#000000818|0|ress, express ideas haggle|
+2086|140329|F|314182.42|1994-10-19|1-URGENT|Clerk#000000046|0| permanently regular|
+2087|49189|O|70532.19|1998-01-31|2-HIGH|Clerk#000000626|0|e always regular packages nod against the furiously spec|
+2112|63289|O|19727.58|1997-02-05|2-HIGH|Clerk#000000351|0|against the slyly even id|
+2113|31867|O|100436.44|1997-11-08|2-HIGH|Clerk#000000527|0|slyly regular instruct|
+2114|78536|F|149985.64|1995-01-16|5-LOW|Clerk#000000751|0|r, unusual accounts haggle across the busy platelets. carefully |
+2115|104101|O|160667.90|1998-05-23|4-NOT SPECIFIED|Clerk#000000101|0|odolites boost. carefully regular excuses cajole. quickly ironic pinto be|
+2116|22207|F|80838.47|1994-08-26|1-URGENT|Clerk#000000197|0|efully after the asymptotes. furiously sp|
+2117|21883|O|205117.50|1997-04-26|2-HIGH|Clerk#000000887|0|ely even dependencies. regular foxes use blithely.|
+2118|133145|O|56415.21|1996-10-09|1-URGENT|Clerk#000000196|0|ial requests wake carefully special packages. f|
+2119|62027|O|60575.04|1996-08-20|2-HIGH|Clerk#000000434|0|uickly pending escapades. fluffily ir|
+2144|134623|F|196472.92|1994-03-29|3-MEDIUM|Clerk#000000546|0|t. carefully quick requests across the deposits wake regu|
+2145|133426|F|28869.91|1992-10-03|1-URGENT|Clerk#000000886|0|sts would snooze blithely alongside of th|
+2146|117520|F|282823.12|1992-09-14|4-NOT SPECIFIED|Clerk#000000476|0|ven packages. dependencies wake slyl|
+2147|98753|F|151017.45|1992-09-06|4-NOT SPECIFIED|Clerk#000000424|0| haggle carefully furiously final foxes. pending escapades thrash. bold theod|
+2148|128239|F|37616.49|1995-04-19|4-NOT SPECIFIED|Clerk#000000517|0|ross the furiously unusual theodolites. always expre|
+2149|100642|F|159589.25|1993-03-13|5-LOW|Clerk#000000555|0|nusual accounts nag furiously special reques|
+2150|81676|F|283459.87|1994-06-03|3-MEDIUM|Clerk#000000154|0|ect slyly against the even, final packages. quickly regular pinto beans wake c|
+2151|57902|O|216383.85|1996-11-11|3-MEDIUM|Clerk#000000996|0|c requests. ironic platelets cajole across the quickly fluffy deposits.|
+2176|103403|F|109875.44|1992-11-10|1-URGENT|Clerk#000000195|0|s haggle regularly accor|
+2177|134927|O|280803.60|1997-01-20|3-MEDIUM|Clerk#000000161|0|ove the blithely unusual packages cajole carefully fluffily special request|
+2178|7973|O|139116.18|1996-12-12|3-MEDIUM|Clerk#000000656|0|thely according to the instructions. furious|
+2179|40979|O|99933.09|1996-09-07|2-HIGH|Clerk#000000935|0|ounts alongside of the furiously unusual braids cajol|
+2180|74938|O|281035.92|1996-09-14|4-NOT SPECIFIED|Clerk#000000650|0|xpress, unusual pains. furiously ironic excu|
+2181|74026|O|177880.62|1995-09-13|3-MEDIUM|Clerk#000000814|0|y against the ironic, even|
+2182|22525|F|180607.07|1994-04-05|2-HIGH|Clerk#000000071|0|ccounts. quickly bold deposits across the excuses sl|
+2183|112012|O|70563.85|1996-06-22|1-URGENT|Clerk#000000287|0| among the express, ironic packages. slyly ironic platelets integrat|
+2208|67594|P|331019.93|1995-05-01|4-NOT SPECIFIED|Clerk#000000900|0|symptotes wake slyly blithely unusual packages.|
+2209|89056|F|214341.18|1992-07-10|2-HIGH|Clerk#000000056|0|er above the slyly silent requests. furiously reg|
+2210|31753|F|56433.02|1992-01-16|2-HIGH|Clerk#000000941|0| believe carefully quickly express pinto beans. deposi|
+2211|91969|F|232658.05|1994-06-30|2-HIGH|Clerk#000000464|0|ffily bold courts e|
+2212|116227|F|31599.39|1994-03-23|3-MEDIUM|Clerk#000000954|0|structions above the unusual requests use fur|
+2213|121216|F|231251.95|1993-01-15|4-NOT SPECIFIED|Clerk#000000598|0|osits are carefully reg|
+2214|114062|O|241753.27|1998-05-05|3-MEDIUM|Clerk#000000253|0|packages. fluffily even accounts haggle blithely. carefully ironic depen|
+2215|38684|O|146208.05|1996-06-16|4-NOT SPECIFIED|Clerk#000000817|0|le final, final foxes. quickly regular gifts are carefully deposit|
+2240|55367|F|276496.15|1992-03-06|4-NOT SPECIFIED|Clerk#000000622|0|accounts against the slyly express foxes are after the slyly regular |
+2241|101276|F|241266.34|1993-05-11|1-URGENT|Clerk#000000081|0|y about the silent excuses. furiously ironic instructions along the sil|
+2242|81416|O|16527.09|1997-07-20|4-NOT SPECIFIED|Clerk#000000360|0| pending multipliers. carefully express asymptotes use quickl|
+2243|47810|O|20504.42|1995-06-10|2-HIGH|Clerk#000000813|0|ously regular deposits integrate s|
+2244|126746|F|28601.86|1993-01-09|1-URGENT|Clerk#000001000|0|ckages. ironic, ironic accounts haggle blithely express excuses. |
+2245|56458|F|222770.91|1993-04-28|3-MEDIUM|Clerk#000000528|0|ake carefully. braids haggle slyly quickly b|
+2246|112325|O|107302.84|1996-05-27|4-NOT SPECIFIED|Clerk#000000739|0| final gifts sleep |
+2247|94630|F|25089.88|1992-08-02|4-NOT SPECIFIED|Clerk#000000947|0|furiously regular packages. final brai|
+2272|138202|F|205341.38|1993-04-13|2-HIGH|Clerk#000000449|0|s. bold, ironic pinto beans wake. silently specia|
+2273|134254|O|214061.54|1996-12-14|5-LOW|Clerk#000000155|0|uickly express foxes haggle quickly against|
+2274|103993|F|85225.21|1993-09-04|4-NOT SPECIFIED|Clerk#000000258|0|nstructions try to hag|
+2275|148325|F|46829.31|1992-10-22|4-NOT SPECIFIED|Clerk#000000206|0| furiously furious platelets. slyly final packa|
+2276|41390|O|202644.34|1996-04-29|4-NOT SPECIFIED|Clerk#000000821|0|ecial requests. fox|
+2277|88705|F|132036.50|1995-01-02|4-NOT SPECIFIED|Clerk#000000385|0|accounts cajole. even i|
+2278|140536|O|160639.36|1998-04-25|3-MEDIUM|Clerk#000000186|0|r pinto beans integrate after the carefully even deposits. blit|
+2279|79399|F|232058.82|1993-02-23|3-MEDIUM|Clerk#000000898|0|de of the quickly unusual instructio|
+2304|44923|F|126354.87|1994-01-07|4-NOT SPECIFIED|Clerk#000000415|0|onic platelets. ironic packages haggle. packages nag doggedly according to|
+2305|41968|F|151482.61|1993-01-26|2-HIGH|Clerk#000000440|0|ove the furiously even acco|
+2306|26710|O|398867.54|1995-07-26|2-HIGH|Clerk#000000975|0| wake furiously requests. permanent requests affix. final packages caj|
+2307|104039|F|82439.12|1993-06-29|5-LOW|Clerk#000000952|0|furiously even asymptotes? carefully regular accounts|
+2308|23890|F|93415.14|1992-10-25|4-NOT SPECIFIED|Clerk#000000609|0|ts. slyly final depo|
+2309|99505|O|219187.00|1995-09-04|5-LOW|Clerk#000000803|0|he carefully pending packages. fluffily stealthy foxes engage carefully|
+2310|30394|O|118386.20|1996-09-20|5-LOW|Clerk#000000917|0|wake carefully. unusual instructions nag ironic, regular excuse|
+2311|71605|P|257239.18|1995-05-02|2-HIGH|Clerk#000000761|0|ly pending asymptotes-- furiously bold excus|
+2336|141388|O|25556.93|1996-01-07|4-NOT SPECIFIED|Clerk#000000902|0|c, final excuses sleep furiously among the even theodolites. f|
+2337|140138|O|83996.36|1997-06-18|4-NOT SPECIFIED|Clerk#000000754|0| quickly. final accounts haggle. carefully final acco|
+2338|139369|O|29723.34|1997-09-15|2-HIGH|Clerk#000000951|0|riously final dugouts. final, ironic packages wake express, ironic id|
+2339|108352|F|105092.65|1993-12-15|5-LOW|Clerk#000000847|0| against the regular |
+2340|64462|O|55626.96|1996-01-12|1-URGENT|Clerk#000000964|0|ter the deposits sleep according to the slyly regular packages. carefully |
+2341|81514|F|74028.24|1993-05-30|5-LOW|Clerk#000000443|0|sts-- blithely bold dolphins through the deposits nag blithely carefully re|
+2342|36427|O|174855.28|1996-06-09|1-URGENT|Clerk#000000615|0|oost carefully across the regular accounts. blithely final d|
+2343|71026|O|145902.44|1995-08-21|3-MEDIUM|Clerk#000000170|0|fluffily over the slyly special deposits. quickl|
+2368|12832|F|163026.68|1993-08-20|1-URGENT|Clerk#000000830|0|t the bold instructions. carefully unusual |
+2369|109624|O|116148.42|1996-12-24|2-HIGH|Clerk#000000752|0|iously even requests are dogged, express |
+2370|140363|F|102350.31|1994-01-17|1-URGENT|Clerk#000000231|0|lyly final packages. quickly final deposits haggl|
+2371|18637|O|262602.78|1998-01-07|1-URGENT|Clerk#000000028|0|ckages haggle at th|
+2372|29062|O|150263.34|1997-11-21|5-LOW|Clerk#000000342|0|s: deposits haggle along the final ideas. careful|
+2373|26231|F|81974.16|1994-03-12|4-NOT SPECIFIED|Clerk#000000306|0| even, special courts grow quickly. pending,|
+2374|3865|F|175693.17|1993-10-29|4-NOT SPECIFIED|Clerk#000000081|0| blithely regular packages. blithely unusua|
+2375|4849|O|146143.90|1996-11-20|3-MEDIUM|Clerk#000000197|0|unusual, pending theodolites cajole carefully |
+2400|36397|O|150304.73|1998-07-25|5-LOW|Clerk#000000782|0|nusual courts nag against the carefully unusual pinto b|
+2401|147568|O|124098.72|1997-07-29|4-NOT SPECIFIED|Clerk#000000531|0|ully unusual instructions boost carefully silently regular requests. |
+2402|65717|O|102468.59|1996-09-06|4-NOT SPECIFIED|Clerk#000000162|0|slyly final sheaves sleep slyly. q|
+2403|54376|O|182351.26|1998-04-11|3-MEDIUM|Clerk#000000820|0|furiously regular deposits use. furiously unusual accounts wake along the |
+2404|76669|O|162281.61|1997-03-13|4-NOT SPECIFIED|Clerk#000000409|0|deposits breach furiously. ironic foxes haggle carefully bold packag|
+2405|71284|O|155170.08|1996-12-23|3-MEDIUM|Clerk#000000535|0|ular, regular asympto|
+2406|6635|O|248750.45|1996-10-28|5-LOW|Clerk#000000561|0|blithely regular accounts u|
+2407|53530|O|148223.74|1998-06-19|2-HIGH|Clerk#000000068|0|uests affix slyly among the slyly regular depos|
+2432|101915|O|96364.14|1996-07-13|1-URGENT|Clerk#000000115|0|re. slyly even deposits wake bra|
+2433|29900|F|209681.19|1994-08-22|4-NOT SPECIFIED|Clerk#000000324|0|ess patterns are slyly. packages haggle carefu|
+2434|23683|O|175002.80|1997-04-27|3-MEDIUM|Clerk#000000190|0|s. quickly ironic dolphins impress final deposits. blithel|
+2435|71318|F|191565.82|1993-02-21|5-LOW|Clerk#000000112|0|es are carefully along the carefully final instructions. pe|
+2436|124972|O|89745.07|1995-09-11|4-NOT SPECIFIED|Clerk#000000549|0|arefully. blithely bold deposits affix special accounts. final foxes nag. spe|
+2437|84359|F|237907.34|1993-04-21|4-NOT SPECIFIED|Clerk#000000578|0|. theodolites wake slyly-- ironic, pending platelets above the carefully exp|
+2438|12544|F|331721.20|1993-07-15|2-HIGH|Clerk#000000744|0|the final, regular warhorses. regularly |
+2439|53978|O|65698.23|1997-03-15|2-HIGH|Clerk#000000819|0|lithely after the car|
+2464|144844|O|43621.20|1997-11-23|5-LOW|Clerk#000000633|0|le about the instructions. courts wake carefully even|
+2465|32354|O|289984.52|1995-06-24|1-URGENT|Clerk#000000078|0|al pinto beans. final, bold packages wake quickly|
+2466|17968|F|230286.88|1994-03-06|1-URGENT|Clerk#000000424|0|c pinto beans. express deposits wake quickly. even, final courts nag. package|
+2467|34198|O|13411.16|1995-07-16|4-NOT SPECIFIED|Clerk#000000914|0|pades sleep furiously. sometimes regular packages again|
+2468|110837|O|222965.97|1997-06-09|4-NOT SPECIFIED|Clerk#000000260|0|ickly regular packages. slyly ruthless requests snooze quickly blithe|
+2469|122899|O|269943.76|1996-11-26|5-LOW|Clerk#000000730|0| sleep closely regular instructions. furiously ironic instructi|
+2470|56233|O|168055.11|1997-04-19|3-MEDIUM|Clerk#000000452|0|to the furiously final packages? pa|
+2471|88373|O|64926.55|1998-03-12|4-NOT SPECIFIED|Clerk#000000860|0|carefully blithely regular pac|
+2496|134117|F|196761.07|1994-01-09|2-HIGH|Clerk#000000142|0|slyly. pending instructions sleep. quic|
+2497|46906|F|276801.77|1992-08-27|1-URGENT|Clerk#000000977|0|ily ironic pinto beans. furiously final platelets alongside of t|
+2498|95870|F|54636.42|1993-11-08|5-LOW|Clerk#000000373|0|g the slyly special pinto beans. |
+2499|120994|O|181237.29|1995-09-24|1-URGENT|Clerk#000000277|0|r the quickly bold foxes. bold instructi|
+2500|132878|F|161720.91|1992-08-15|2-HIGH|Clerk#000000447|0|integrate slyly pending deposits. furiously ironic accounts across the s|
+2501|66415|O|140174.62|1997-05-25|5-LOW|Clerk#000000144|0|ickly special theodolite|
+2502|69356|F|38318.31|1993-05-28|4-NOT SPECIFIED|Clerk#000000914|0|lyly: carefully pending ideas affix again|
+2503|6538|F|257866.34|1993-06-20|3-MEDIUM|Clerk#000000294|0|ly even packages was. ironic, regular deposits unwind furiously across the p|
+2528|54986|F|115391.05|1994-11-20|1-URGENT|Clerk#000000789|0|ular dependencies? regular frays kindle according to the blith|
+2529|134836|O|6633.85|1996-08-20|2-HIGH|Clerk#000000511|0|posits across the silent instructions wake blithely across |
+2530|127075|F|85200.46|1994-03-21|3-MEDIUM|Clerk#000000291|0|ular instructions about the quic|
+2531|43354|O|238750.97|1996-05-06|4-NOT SPECIFIED|Clerk#000000095|0|even accounts. furiously ironic excuses sleep fluffily. carefully silen|
+2532|93536|O|176642.09|1995-10-11|2-HIGH|Clerk#000000498|0|the blithely pending accounts. regular, regular excuses boost aro|
+2533|49547|O|269844.93|1997-03-24|1-URGENT|Clerk#000000594|0|ecial instructions. spec|
+2534|75217|O|332451.75|1996-07-17|3-MEDIUM|Clerk#000000332|0|packages cajole ironic requests. furiously regular|
+2535|120128|F|91065.74|1993-05-25|5-LOW|Clerk#000000296|0|phins cajole beneath the fluffily express asymptotes. c|
+2560|130771|F|254183.14|1992-09-05|1-URGENT|Clerk#000000538|0|atelets; quickly sly requests|
+2561|57121|O|205987.67|1997-11-14|1-URGENT|Clerk#000000861|0|ual requests. unusual deposits cajole furiously pending, regular platelets. |
+2562|8153|F|175064.15|1992-08-01|1-URGENT|Clerk#000000467|0|elets. pending dolphins promise slyly. bo|
+2563|61123|F|259320.78|1993-11-19|4-NOT SPECIFIED|Clerk#000000150|0|sly even packages after the furio|
+2564|76079|F|4826.38|1994-09-09|2-HIGH|Clerk#000000718|0|usly regular pinto beans. orbits wake carefully. slyly e|
+2565|55304|O|293551.19|1998-02-28|3-MEDIUM|Clerk#000000032|0|x-ray blithely along|
+2566|85478|F|127333.72|1992-10-10|3-MEDIUM|Clerk#000000414|0|ructions boost bold ideas. idly ironic accounts use according to th|
+2567|69388|O|407282.71|1998-02-27|2-HIGH|Clerk#000000031|0|detect. furiously ironic requests|
+2592|100571|F|13636.27|1993-03-05|4-NOT SPECIFIED|Clerk#000000524|0|ts nag fluffily. quickly stealthy theodolite|
+2593|91253|F|147362.04|1993-09-04|2-HIGH|Clerk#000000468|0|r the carefully final|
+2594|77599|F|159213.64|1992-12-17|1-URGENT|Clerk#000000550|0|ests. theodolites above the blithely even accounts detect furio|
+2595|73696|O|269836.82|1995-12-14|4-NOT SPECIFIED|Clerk#000000222|0|arefully ironic requests nag carefully ideas. |
+2596|42328|O|124359.00|1996-08-17|1-URGENT|Clerk#000000242|0|requests. ironic, bold theodolites wak|
+2597|103456|F|39412.20|1993-02-04|2-HIGH|Clerk#000000757|0|iously ruthless exc|
+2598|111955|O|113107.22|1996-03-05|3-MEDIUM|Clerk#000000391|0| ironic notornis according to the blithely final requests should |
+2599|148282|O|102777.64|1996-11-07|2-HIGH|Clerk#000000722|0|ts. slyly regular theodolites wake sil|
+2624|51187|O|41409.87|1996-11-28|5-LOW|Clerk#000000930|0|ic, regular packages|
+2625|38047|F|54164.64|1992-10-14|4-NOT SPECIFIED|Clerk#000000386|0| final deposits. blithely ironic ideas |
+2626|137033|O|116217.91|1995-09-08|4-NOT SPECIFIED|Clerk#000000289|0|gside of the carefully special packages are furiously after the slyly express |
+2627|148424|F|41910.30|1992-03-24|3-MEDIUM|Clerk#000000181|0|s. silent, ruthless requests|
+2628|55228|F|292422.56|1993-10-22|5-LOW|Clerk#000000836|0|ajole across the blithely careful accounts. blithely silent deposits sl|
+2629|137198|O|125694.87|1998-04-06|5-LOW|Clerk#000000680|0|uches dazzle carefully even, express excuses. ac|
+2630|83941|F|192986.10|1992-10-24|5-LOW|Clerk#000000712|0|inal theodolites. ironic instructions s|
+2631|35017|F|106597.85|1993-09-24|5-LOW|Clerk#000000833|0| quickly unusual deposits doubt around |
+2656|76525|F|157915.17|1993-05-04|1-URGENT|Clerk#000000307|0|elets. slyly final accou|
+2657|24589|O|213347.28|1995-10-17|2-HIGH|Clerk#000000160|0| foxes-- slyly final dependencies around the slyly final theodo|
+2658|13384|O|267602.18|1995-09-23|3-MEDIUM|Clerk#000000400|0|bout the slyly regular accounts. ironic, |
+2659|82054|F|120789.36|1993-12-18|4-NOT SPECIFIED|Clerk#000000758|0|cross the pending requests maintain |
+2660|126238|O|29464.99|1995-08-05|5-LOW|Clerk#000000480|0|ly finally regular deposits. ironic theodolites cajole|
+2661|73883|O|126283.42|1997-01-04|3-MEDIUM|Clerk#000000217|0|al, regular pinto beans. silently final deposits should have t|
+2662|36830|O|142981.30|1996-08-21|3-MEDIUM|Clerk#000000589|0|bold pinto beans above the slyly final accounts affix furiously deposits. pac|
+2663|94012|O|42757.07|1995-09-06|1-URGENT|Clerk#000000950|0|ar requests. furiously final dolphins along the fluffily spe|
+2688|97342|F|286232.36|1992-01-24|2-HIGH|Clerk#000000720|0|have to nag according to the pending theodolites. sly|
+2689|101303|F|75066.98|1992-04-09|4-NOT SPECIFIED|Clerk#000000698|0|press pains wake. furiously express theodolites alongsid|
+2690|93499|O|329934.42|1996-03-31|3-MEDIUM|Clerk#000000760|0|ravely even theodolites |
+2691|5860|F|42354.33|1992-04-30|5-LOW|Clerk#000000439|0|es at the regular deposits sleep slyly by the fluffy requests. eve|
+2692|61717|O|35333.21|1997-12-02|3-MEDIUM|Clerk#000000878|0|es. regular asymptotes cajole above t|
+2693|17018|O|119386.42|1996-09-04|1-URGENT|Clerk#000000370|0|ndle never. blithely regular packages nag carefully enticing platelets. ca|
+2694|119089|O|156639.52|1996-03-14|5-LOW|Clerk#000000722|0| requests. bold deposits above the theodol|
+2695|56273|O|207616.18|1996-08-20|1-URGENT|Clerk#000000697|0|ven deposits around the quickly regular packa|
+2720|29966|F|259281.51|1993-06-08|1-URGENT|Clerk#000000948|0|quickly. special asymptotes are fluffily ironi|
+2721|77266|O|84787.30|1996-01-27|2-HIGH|Clerk#000000401|0| ideas eat even, unusual ideas. theodolites are carefully|
+2722|34633|F|88200.39|1994-04-09|5-LOW|Clerk#000000638|0|rding to the carefully quick deposits. bli|
+2723|59146|O|152391.00|1995-10-06|5-LOW|Clerk#000000836|0|nts must have to cajo|
+2724|136825|F|169806.15|1994-09-14|2-HIGH|Clerk#000000217|0| sleep blithely. blithely idle |
+2725|88348|F|126227.05|1994-05-21|4-NOT SPECIFIED|Clerk#000000835|0|ular deposits. spec|
+2726|5663|F|82449.45|1992-11-27|5-LOW|Clerk#000000470|0| blithely even dinos sleep care|
+2727|73849|O|5816.23|1998-04-19|4-NOT SPECIFIED|Clerk#000000879|0|sual theodolites cajole enticingly above the furiously fin|
+2752|58067|F|307274.74|1993-11-19|2-HIGH|Clerk#000000648|0| carefully regular foxes are quickly quickl|
+2753|15388|F|230201.94|1993-11-30|2-HIGH|Clerk#000000380|0|ending instructions. unusual deposits|
+2754|143743|F|39672.40|1994-04-03|2-HIGH|Clerk#000000960|0|cies detect slyly. |
+2755|117655|F|130421.41|1992-02-07|4-NOT SPECIFIED|Clerk#000000177|0|ously according to the sly foxes. blithely regular pinto bean|
+2756|116546|F|210764.12|1994-04-18|1-URGENT|Clerk#000000537|0|arefully special warho|
+2757|75499|O|135260.24|1995-07-20|2-HIGH|Clerk#000000216|0| regular requests subl|
+2758|41179|O|69686.82|1998-07-12|5-LOW|Clerk#000000863|0|s cajole according to the carefully special |
+2759|115717|F|157587.60|1993-11-25|4-NOT SPECIFIED|Clerk#000000071|0|ts. regular, pending pinto beans sleep ab|
+2784|94193|O|160491.43|1998-01-07|1-URGENT|Clerk#000000540|0|g deposits alongside of the silent requests s|
+2785|146008|O|183376.84|1995-07-21|2-HIGH|Clerk#000000098|0|iously pending packages sleep according to the blithely unusual foxe|
+2786|78760|F|245985.01|1992-03-22|2-HIGH|Clerk#000000976|0|al platelets cajole blithely ironic requests. ironic re|
+2787|102814|O|5636.84|1995-09-30|1-URGENT|Clerk#000000906|0|he ironic, regular |
+2788|123353|F|20281.75|1994-09-22|1-URGENT|Clerk#000000641|0|nts wake across the fluffily bold accoun|
+2789|36014|O|368991.44|1998-03-14|2-HIGH|Clerk#000000972|0|gular patterns boost. carefully even re|
+2790|24854|F|294381.64|1994-08-19|2-HIGH|Clerk#000000679|0| the carefully express deposits sleep slyly |
+2791|120686|F|249363.13|1994-10-10|2-HIGH|Clerk#000000662|0|as. slyly ironic accounts play furiously bl|
+2816|56695|F|54339.69|1994-09-20|2-HIGH|Clerk#000000289|0|kages at the final deposits cajole furious foxes. quickly |
+2817|39514|F|97061.86|1994-04-19|3-MEDIUM|Clerk#000000982|0|ic foxes haggle upon the daringly even pinto beans. slyly|
+2818|48157|F|186074.95|1994-12-12|3-MEDIUM|Clerk#000000413|0|eep furiously special ideas. express |
+2819|101843|F|106478.85|1994-05-05|1-URGENT|Clerk#000000769|0|ngside of the blithely ironic dolphins. furio|
+2820|18062|F|181656.19|1994-05-20|3-MEDIUM|Clerk#000000807|0|equests are furiously. carefu|
+2821|116528|F|57739.46|1993-08-09|3-MEDIUM|Clerk#000000323|0|ng requests. even instructions are quickly express, silent instructi|
+2822|78767|F|69341.69|1993-07-26|2-HIGH|Clerk#000000510|0|furiously against the accounts. unusual accounts aft|
+2823|78017|O|281301.17|1995-09-09|2-HIGH|Clerk#000000567|0|encies. carefully fluffy accounts m|
+2848|68851|F|169913.72|1992-03-10|1-URGENT|Clerk#000000256|0|ly fluffy foxes sleep furiously across the slyly regu|
+2849|45496|O|282488.42|1996-04-30|2-HIGH|Clerk#000000659|0|al packages are after the quickly bold requests. carefully special |
+2850|99137|O|207301.24|1996-10-02|2-HIGH|Clerk#000000392|0|, regular deposits. furiously pending packages hinder carefully carefully u|
+2851|144481|O|11243.46|1997-09-07|5-LOW|Clerk#000000566|0|Tiresias wake quickly quickly even|
+2852|89122|F|119836.61|1993-01-16|1-URGENT|Clerk#000000740|0|ruthless deposits against the final instructions use quickly al|
+2853|92818|F|148957.79|1994-05-05|2-HIGH|Clerk#000000878|0|the carefully even packages.|
+2854|138295|F|209070.53|1994-06-27|1-URGENT|Clerk#000000010|0| furiously ironic tithes use furiously |
+2855|47990|F|60002.03|1993-04-04|4-NOT SPECIFIED|Clerk#000000973|0| silent, regular packages sleep |
+2880|7870|F|221353.47|1992-03-15|2-HIGH|Clerk#000000756|0|ves maintain doggedly spec|
+2881|98539|F|57299.53|1992-05-10|5-LOW|Clerk#000000864|0|uriously. slyly express requests according to the silent dol|
+2882|120092|O|263220.81|1995-08-22|2-HIGH|Clerk#000000891|0|pending deposits. carefully eve|
+2883|120787|F|242467.87|1995-01-23|5-LOW|Clerk#000000180|0|uses. carefully ironic accounts lose fluffil|
+2884|91375|O|95281.29|1997-10-12|3-MEDIUM|Clerk#000000780|0|efully express instructions sleep against|
+2885|6022|F|205387.44|1992-09-19|4-NOT SPECIFIED|Clerk#000000280|0|ly sometimes special excuses. final requests are |
+2886|107435|F|177118.79|1994-11-13|4-NOT SPECIFIED|Clerk#000000619|0|uctions. ironic packages sle|
+2887|107426|O|43570.56|1997-05-26|5-LOW|Clerk#000000566|0|slyly even pinto beans. slyly bold epitaphs cajole blithely above t|
+2912|92009|F|42408.84|1992-03-12|5-LOW|Clerk#000000186|0|jole blithely above the quickly regular packages. carefully regular pinto bean|
+2913|42205|O|218390.62|1997-07-12|3-MEDIUM|Clerk#000000118|0|mptotes doubt furiously slyly regu|
+2914|107015|F|86207.41|1993-03-03|3-MEDIUM|Clerk#000000543|0|he slyly regular theodolites are furiously sile|
+2915|92489|F|114403.37|1994-03-31|5-LOW|Clerk#000000410|0|ld packages. bold deposits boost blithely. ironic, unusual theodoli|
+2916|7351|O|30455.66|1995-12-27|2-HIGH|Clerk#000000681|0|ithely blithe deposits sleep beyond the|
+2917|90730|O|161411.48|1997-12-09|4-NOT SPECIFIED|Clerk#000000061|0| special dugouts among the special deposi|
+2918|117481|O|30911.58|1996-09-08|3-MEDIUM|Clerk#000000439|0|ular deposits across th|
+2919|52543|F|192121.75|1993-12-10|2-HIGH|Clerk#000000209|0|es. pearls wake quietly slyly ironic instructions--|
+2944|13532|O|234481.13|1997-09-24|4-NOT SPECIFIED|Clerk#000000740|0|deas. permanently special foxes haggle carefully ab|
+2945|28157|O|319752.31|1996-01-03|2-HIGH|Clerk#000000499|0|ons are carefully toward the permanent, bold pinto beans. regu|
+2946|124123|O|148466.74|1996-02-05|5-LOW|Clerk#000000329|0|g instructions about the regular accounts sleep carefully along the pen|
+2947|69538|P|79951.00|1995-04-26|1-URGENT|Clerk#000000464|0|ronic accounts. accounts run furiously d|
+2948|43723|F|159855.90|1994-08-23|5-LOW|Clerk#000000701|0| deposits according to the blithely pending |
+2949|136237|F|133973.42|1994-04-12|2-HIGH|Clerk#000000184|0|y ironic accounts use. quickly blithe accou|
+2950|134485|O|297421.11|1997-07-06|1-URGENT|Clerk#000000833|0| dolphins around the furiously |
+2951|73624|O|157192.83|1996-02-06|2-HIGH|Clerk#000000680|0|gular deposits above the finally regular ideas integrate idly stealthil|
+2976|28030|F|204008.39|1993-12-10|4-NOT SPECIFIED|Clerk#000000159|0|. furiously ironic asymptotes haggle ruthlessly silently regular r|
+2977|71090|O|45116.47|1996-08-27|3-MEDIUM|Clerk#000000252|0|quickly special platelets are furio|
+2978|43750|P|242965.87|1995-05-03|1-URGENT|Clerk#000000135|0|d. even platelets are. ironic dependencies cajole slow, e|
+2979|132928|O|177712.18|1996-03-23|3-MEDIUM|Clerk#000000820|0|even, ironic foxes sleep along|
+2980|3743|O|279365.71|1996-09-14|3-MEDIUM|Clerk#000000661|0|y quick pinto beans wake. slyly re|
+2981|47389|O|58766.88|1998-07-29|5-LOW|Clerk#000000299|0|hely among the express foxes. blithely stealthy requests cajole boldly. regu|
+2982|84308|F|86439.95|1995-03-19|2-HIGH|Clerk#000000402|0|lyly. express theodolites affix slyly after the slyly speci|
+2983|61165|F|66398.29|1992-01-07|1-URGENT|Clerk#000000278|0|r the even requests. accounts maintain. regular accounts|
+3008|39310|O|190301.90|1995-11-08|3-MEDIUM|Clerk#000000701|0|ze quickly. blithely regular packages above the slyly bold foxes shall|
+3009|54398|O|188106.76|1997-02-28|1-URGENT|Clerk#000000205|0|r ideas. carefully pe|
+3010|7459|O|223510.43|1996-01-26|2-HIGH|Clerk#000000931|0| blithely final requests. special deposits are slyl|
+3011|89752|F|77704.97|1992-01-14|5-LOW|Clerk#000000515|0|onic deposits kindle slyly. dependencies around the quickly iro|
+3012|31345|F|113122.00|1993-05-05|1-URGENT|Clerk#000000414|0|ts after the regular pinto beans impress blithely s|
+3013|142238|O|269815.39|1997-02-05|5-LOW|Clerk#000000591|0|the furiously pendin|
+3014|28745|F|277661.69|1992-10-30|4-NOT SPECIFIED|Clerk#000000476|0|ep blithely according to the blith|
+3015|101527|F|162415.39|1992-09-27|5-LOW|Clerk#000000013|0|ously regular deposits affix carefully. furiousl|
+3040|110666|F|181298.05|1993-04-12|3-MEDIUM|Clerk#000000544|0|carefully special packages. blithe|
+3041|112571|O|37519.81|1997-06-03|5-LOW|Clerk#000000092|0|s. unusual, pending deposits use carefully. thinly final|
+3042|19879|F|171849.26|1994-11-21|3-MEDIUM|Clerk#000000573|0| the slyly ironic depo|
+3043|43334|F|111837.88|1992-04-25|5-LOW|Clerk#000000137|0|cajole blithely furiously fina|
+3044|52723|O|82856.88|1996-04-03|2-HIGH|Clerk#000000008|0|cajole final courts. ironic deposits about the quickly final re|
+3045|49795|O|117398.68|1995-09-27|1-URGENT|Clerk#000000405|0| express courts sleep quickly special asymptotes. |
+3046|31373|O|176436.62|1995-11-30|2-HIGH|Clerk#000000522|0|r deposits. platelets use furi|
+3047|24563|O|76626.65|1997-03-21|1-URGENT|Clerk#000000962|0|as. slyly express deposits are dogged pearls. silent ide|
+3072|22438|F|131016.14|1994-01-30|4-NOT SPECIFIED|Clerk#000000370|0|ely final deposits cajole carefully. ironic, re|
+3073|135226|F|253550.24|1994-01-08|3-MEDIUM|Clerk#000000404|0|kly slyly bold accounts. express courts near the regular ideas sleep bli|
+3074|65659|F|129096.60|1992-11-01|5-LOW|Clerk#000000546|0|yly even asymptotes shall have to haggle fluffily. deposits are|
+3075|125071|F|73647.18|1994-05-07|3-MEDIUM|Clerk#000000433|0|ackages: carefully unusual reques|
+3076|91354|F|124595.32|1993-07-23|2-HIGH|Clerk#000000099|0|busy foxes. deposits affix quickly ironic, pending pint|
+3077|120703|O|147077.46|1997-08-06|2-HIGH|Clerk#000000228|0|kly. fluffily ironic requests use qui|
+3078|47210|F|60977.04|1993-02-12|2-HIGH|Clerk#000000110|0|ounts are alongside of the blith|
+3079|98518|O|214396.25|1997-09-12|5-LOW|Clerk#000000505|0|lly ironic accounts|
+3104|68837|F|175130.65|1993-09-16|3-MEDIUM|Clerk#000000871|0|ges boost-- regular accounts are furiousl|
+3105|136471|O|179233.84|1996-11-13|4-NOT SPECIFIED|Clerk#000000772|0|s. blithely final ins|
+3106|144469|O|231617.72|1997-01-12|3-MEDIUM|Clerk#000000729|0|its use slyly final theodolites; regular dolphins hang above t|
+3107|25316|O|153692.68|1997-08-21|1-URGENT|Clerk#000000669|0|ously even deposits acr|
+3108|84331|F|120387.80|1993-08-05|1-URGENT|Clerk#000000574|0|s packages haggle furiously am|
+3109|122080|F|295217.85|1993-07-24|5-LOW|Clerk#000000936|0|bold requests sleep quickly according to the slyly final|
+3110|87293|F|157451.80|1994-12-17|2-HIGH|Clerk#000000564|0|round the fluffy instructions. carefully silent packages cajol|
+3111|132733|O|208128.46|1995-08-25|5-LOW|Clerk#000000922|0|slyly regular theodolites. furious deposits cajole deposits. ironic theodoli|
+3136|22954|F|199929.44|1994-08-10|4-NOT SPECIFIED|Clerk#000000891|0|tructions sleep slyly. pending di|
+3137|134096|O|11788.67|1995-07-26|3-MEDIUM|Clerk#000000063|0|ymptotes wake carefully above t|
+3138|138961|F|195207.34|1994-02-09|4-NOT SPECIFIED|Clerk#000000650|0|e fluffily final theodolites. even dependencies wake along the quickly ir|
+3139|16475|F|54456.91|1992-01-02|3-MEDIUM|Clerk#000000855|0|ounts against the ruthlessly unusual dolphins|
+3140|144880|F|88865.00|1992-04-09|1-URGENT|Clerk#000000670|0|carefully ironic deposits use furiously. blith|
+3141|25108|O|186498.52|1995-11-10|1-URGENT|Clerk#000000475|0|es. furiously bold instructions after the carefully final p|
+3142|7237|F|19551.67|1992-06-28|3-MEDIUM|Clerk#000000043|0|usual accounts about the carefully special requests sleep slyly quickly regul|
+3143|106750|F|204967.06|1993-02-17|1-URGENT|Clerk#000000519|0| are final, ironic accounts. ironic |
+3168|135731|F|116852.47|1992-01-30|5-LOW|Clerk#000000352|0|s sleep slyly? ironic, furious instructions detect. quickly final i|
+3169|18011|F|176508.38|1993-12-21|3-MEDIUM|Clerk#000000252|0| even pinto beans are blithely special, special multip|
+3170|4243|O|293842.55|1997-11-09|1-URGENT|Clerk#000000288|0|requests. furiously bold|


<TRUNCATED>

[40/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/integration/spark-common-test/src/test/resources/tpch/lineitem.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/tpch/lineitem.csv b/integration/spark-common-test/src/test/resources/tpch/lineitem.csv
new file mode 100644
index 0000000..e792319
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/tpch/lineitem.csv
@@ -0,0 +1,1000 @@
+1|155190|7706|1|17|21168.23|0.04|0.02|N|O|1996-03-13|1996-02-12|1996-03-22|DELIVER IN PERSON|TRUCK|egular courts above the|
+1|67310|7311|2|36|45983.16|0.09|0.06|N|O|1996-04-12|1996-02-28|1996-04-20|TAKE BACK RETURN|MAIL|ly final dependencies: slyly bold |
+1|63700|3701|3|8|13309.60|0.10|0.02|N|O|1996-01-29|1996-03-05|1996-01-31|TAKE BACK RETURN|REG AIR|riously. regular, express dep|
+1|2132|4633|4|28|28955.64|0.09|0.06|N|O|1996-04-21|1996-03-30|1996-05-16|NONE|AIR|lites. fluffily even de|
+1|24027|1534|5|24|22824.48|0.10|0.04|N|O|1996-03-30|1996-03-14|1996-04-01|NONE|FOB| pending foxes. slyly re|
+1|15635|638|6|32|49620.16|0.07|0.02|N|O|1996-01-30|1996-02-07|1996-02-03|DELIVER IN PERSON|MAIL|arefully slyly ex|
+2|106170|1191|1|38|44694.46|0.00|0.05|N|O|1997-01-28|1997-01-14|1997-02-02|TAKE BACK RETURN|RAIL|ven requests. deposits breach a|
+3|4297|1798|1|45|54058.05|0.06|0.00|R|F|1994-02-02|1994-01-04|1994-02-23|NONE|AIR|ongside of the furiously brave acco|
+3|19036|6540|2|49|46796.47|0.10|0.00|R|F|1993-11-09|1993-12-20|1993-11-24|TAKE BACK RETURN|RAIL| unusual accounts. eve|
+3|128449|3474|3|27|39890.88|0.06|0.07|A|F|1994-01-16|1993-11-22|1994-01-23|DELIVER IN PERSON|SHIP|nal foxes wake. |
+3|29380|1883|4|2|2618.76|0.01|0.06|A|F|1993-12-04|1994-01-07|1994-01-01|NONE|TRUCK|y. fluffily pending d|
+3|183095|650|5|28|32986.52|0.04|0.00|R|F|1993-12-14|1994-01-10|1994-01-01|TAKE BACK RETURN|FOB|ages nag slyly pending|
+3|62143|9662|6|26|28733.64|0.10|0.02|A|F|1993-10-29|1993-12-18|1993-11-04|TAKE BACK RETURN|RAIL|ges sleep after the caref|
+4|88035|5560|1|30|30690.90|0.03|0.08|N|O|1996-01-10|1995-12-14|1996-01-18|DELIVER IN PERSON|REG AIR|- quickly regular packages sleep. idly|
+5|108570|8571|1|15|23678.55|0.02|0.04|R|F|1994-10-31|1994-08-31|1994-11-20|NONE|AIR|ts wake furiously |
+5|123927|3928|2|26|50723.92|0.07|0.08|R|F|1994-10-16|1994-09-25|1994-10-19|NONE|FOB|sts use slyly quickly special instruc|
+5|37531|35|3|50|73426.50|0.08|0.03|A|F|1994-08-08|1994-10-13|1994-08-26|DELIVER IN PERSON|AIR|eodolites. fluffily unusual|
+6|139636|2150|1|37|61998.31|0.08|0.03|A|F|1992-04-27|1992-05-15|1992-05-02|TAKE BACK RETURN|TRUCK|p furiously special foxes|
+7|182052|9607|1|12|13608.60|0.07|0.03|N|O|1996-05-07|1996-03-13|1996-06-03|TAKE BACK RETURN|FOB|ss pinto beans wake against th|
+7|145243|7758|2|9|11594.16|0.08|0.08|N|O|1996-02-01|1996-03-02|1996-02-19|TAKE BACK RETURN|SHIP|es. instructions|
+7|94780|9799|3|46|81639.88|0.10|0.07|N|O|1996-01-15|1996-03-27|1996-02-03|COLLECT COD|MAIL| unusual reques|
+7|163073|3074|4|28|31809.96|0.03|0.04|N|O|1996-03-21|1996-04-08|1996-04-20|NONE|FOB|. slyly special requests haggl|
+7|151894|9440|5|38|73943.82|0.08|0.01|N|O|1996-02-11|1996-02-24|1996-02-18|DELIVER IN PERSON|TRUCK|ns haggle carefully ironic deposits. bl|
+7|79251|1759|6|35|43058.75|0.06|0.03|N|O|1996-01-16|1996-02-23|1996-01-22|TAKE BACK RETURN|FOB|jole. excuses wake carefully alongside of |
+7|157238|2269|7|5|6476.15|0.04|0.02|N|O|1996-02-10|1996-03-26|1996-02-13|NONE|FOB|ithely regula|
+32|82704|7721|1|28|47227.60|0.05|0.08|N|O|1995-10-23|1995-08-27|1995-10-26|TAKE BACK RETURN|TRUCK|sleep quickly. req|
+32|197921|441|2|32|64605.44|0.02|0.00|N|O|1995-08-14|1995-10-07|1995-08-27|COLLECT COD|AIR|lithely regular deposits. fluffily |
+32|44161|6666|3|2|2210.32|0.09|0.02|N|O|1995-08-07|1995-10-07|1995-08-23|DELIVER IN PERSON|AIR| express accounts wake according to the|
+32|2743|7744|4|4|6582.96|0.09|0.03|N|O|1995-08-04|1995-10-01|1995-09-03|NONE|REG AIR|e slyly final pac|
+32|85811|8320|5|44|79059.64|0.05|0.06|N|O|1995-08-28|1995-08-20|1995-09-14|DELIVER IN PERSON|AIR|symptotes nag according to the ironic depo|
+32|11615|4117|6|6|9159.66|0.04|0.03|N|O|1995-07-21|1995-09-23|1995-07-25|COLLECT COD|RAIL| gifts cajole carefully.|
+33|61336|8855|1|31|40217.23|0.09|0.04|A|F|1993-10-29|1993-12-19|1993-11-08|COLLECT COD|TRUCK|ng to the furiously ironic package|
+33|60519|5532|2|32|47344.32|0.02|0.05|A|F|1993-12-09|1994-01-04|1993-12-28|COLLECT COD|MAIL|gular theodolites|
+33|137469|9983|3|5|7532.30|0.05|0.03|A|F|1993-12-09|1993-12-25|1993-12-23|TAKE BACK RETURN|AIR|. stealthily bold exc|
+33|33918|3919|4|41|75928.31|0.09|0.00|R|F|1993-11-09|1994-01-24|1993-11-11|TAKE BACK RETURN|MAIL|unusual packages doubt caref|
+34|88362|871|1|13|17554.68|0.00|0.07|N|O|1998-10-23|1998-09-14|1998-11-06|NONE|REG AIR|nic accounts. deposits are alon|
+34|89414|1923|2|22|30875.02|0.08|0.06|N|O|1998-10-09|1998-10-16|1998-10-12|NONE|FOB|thely slyly p|
+34|169544|4577|3|6|9681.24|0.02|0.06|N|O|1998-10-30|1998-09-20|1998-11-05|NONE|FOB|ar foxes sleep |
+35|450|2951|1|24|32410.80|0.02|0.00|N|O|1996-02-21|1996-01-03|1996-03-18|TAKE BACK RETURN|FOB|, regular tithe|
+35|161940|4457|2|34|68065.96|0.06|0.08|N|O|1996-01-22|1996-01-06|1996-01-27|DELIVER IN PERSON|RAIL|s are carefully against the f|
+35|120896|8433|3|7|13418.23|0.06|0.04|N|O|1996-01-19|1995-12-22|1996-01-29|NONE|MAIL| the carefully regular |
+35|85175|7684|4|25|29004.25|0.06|0.05|N|O|1995-11-26|1995-12-25|1995-12-21|DELIVER IN PERSON|SHIP| quickly unti|
+35|119917|4940|5|34|65854.94|0.08|0.06|N|O|1995-11-08|1996-01-15|1995-11-26|COLLECT COD|MAIL|. silent, unusual deposits boost|
+35|30762|3266|6|28|47397.28|0.03|0.02|N|O|1996-02-01|1995-12-24|1996-02-28|COLLECT COD|RAIL|ly alongside of |
+36|119767|9768|1|42|75043.92|0.09|0.00|N|O|1996-02-03|1996-01-21|1996-02-23|COLLECT COD|SHIP| careful courts. special |
+37|22630|5133|1|40|62105.20|0.09|0.03|A|F|1992-07-21|1992-08-01|1992-08-15|NONE|REG AIR|luffily regular requests. slyly final acco|
+37|126782|1807|2|39|70542.42|0.05|0.02|A|F|1992-07-02|1992-08-18|1992-07-28|TAKE BACK RETURN|RAIL|the final requests. ca|
+37|12903|5405|3|43|78083.70|0.05|0.08|A|F|1992-07-10|1992-07-06|1992-08-02|DELIVER IN PERSON|TRUCK|iously ste|
+38|175839|874|1|44|84252.52|0.04|0.02|N|O|1996-09-29|1996-11-17|1996-09-30|COLLECT COD|MAIL|s. blithely unusual theodolites am|
+39|2320|9821|1|44|53782.08|0.09|0.06|N|O|1996-11-14|1996-12-15|1996-12-12|COLLECT COD|RAIL|eodolites. careful|
+39|186582|4137|2|26|43383.08|0.08|0.04|N|O|1996-11-04|1996-10-20|1996-11-20|NONE|FOB|ckages across the slyly silent|
+39|67831|5350|3|46|82746.18|0.06|0.08|N|O|1996-09-26|1996-12-19|1996-10-26|DELIVER IN PERSON|AIR|he carefully e|
+39|20590|3093|4|32|48338.88|0.07|0.05|N|O|1996-10-02|1996-12-19|1996-10-14|COLLECT COD|MAIL|heodolites sleep silently pending foxes. ac|
+39|54519|9530|5|43|63360.93|0.01|0.01|N|O|1996-10-17|1996-11-14|1996-10-26|COLLECT COD|MAIL|yly regular i|
+39|94368|6878|6|40|54494.40|0.06|0.05|N|O|1996-12-08|1996-10-22|1997-01-01|COLLECT COD|AIR|quickly ironic fox|
+64|85951|5952|1|21|40675.95|0.05|0.02|R|F|1994-09-30|1994-09-18|1994-10-26|DELIVER IN PERSON|REG AIR|ch slyly final, thin platelets.|
+65|59694|4705|1|26|42995.94|0.03|0.03|A|F|1995-04-20|1995-04-25|1995-05-13|NONE|TRUCK|pending deposits nag even packages. ca|
+65|73815|8830|2|22|39353.82|0.00|0.05|N|O|1995-07-17|1995-06-04|1995-07-19|COLLECT COD|FOB| ideas. special, r|
+65|1388|3889|3|21|27076.98|0.09|0.07|N|O|1995-07-06|1995-05-14|1995-07-31|DELIVER IN PERSON|RAIL|bove the even packages. accounts nag carefu|
+66|115118|7630|1|31|35126.41|0.00|0.08|R|F|1994-02-19|1994-03-11|1994-02-20|TAKE BACK RETURN|RAIL|ut the unusual accounts sleep at the bo|
+66|173489|3490|2|41|64061.68|0.04|0.07|A|F|1994-02-21|1994-03-01|1994-03-18|COLLECT COD|AIR| regular de|
+67|21636|9143|1|4|6230.52|0.09|0.04|N|O|1997-04-17|1997-01-31|1997-04-20|NONE|SHIP| cajole thinly expres|
+67|20193|5198|2|12|13358.28|0.09|0.05|N|O|1997-01-27|1997-02-21|1997-02-22|NONE|REG AIR| even packages cajole|
+67|173600|6118|3|5|8368.00|0.03|0.07|N|O|1997-02-20|1997-02-12|1997-02-21|DELIVER IN PERSON|TRUCK|y unusual packages thrash pinto |
+67|87514|7515|4|44|66066.44|0.08|0.06|N|O|1997-03-18|1997-01-29|1997-04-13|DELIVER IN PERSON|RAIL|se quickly above the even, express reques|
+67|40613|8126|5|23|35733.03|0.05|0.07|N|O|1997-04-19|1997-02-14|1997-05-06|DELIVER IN PERSON|REG AIR|ly regular deposit|
+67|178306|824|6|29|40144.70|0.02|0.05|N|O|1997-01-25|1997-01-27|1997-01-27|DELIVER IN PERSON|FOB|ultipliers |
+68|7068|9569|1|3|2925.18|0.05|0.02|N|O|1998-07-04|1998-06-05|1998-07-21|NONE|RAIL|fully special instructions cajole. furious|
+68|175180|2732|2|46|57738.28|0.02|0.05|N|O|1998-06-26|1998-06-07|1998-07-05|NONE|MAIL| requests are unusual, regular pinto |
+68|34980|7484|3|46|88089.08|0.04|0.05|N|O|1998-08-13|1998-07-08|1998-08-29|NONE|RAIL|egular dependencies affix ironically along |
+68|94728|2256|4|20|34454.40|0.07|0.01|N|O|1998-06-27|1998-05-23|1998-07-02|NONE|REG AIR| excuses integrate fluffily |
+68|82758|5267|5|27|47000.25|0.03|0.06|N|O|1998-06-19|1998-06-25|1998-06-29|DELIVER IN PERSON|SHIP|ccounts. deposits use. furiously|
+68|102561|5072|6|30|46906.80|0.05|0.06|N|O|1998-08-11|1998-07-11|1998-08-14|NONE|RAIL|oxes are slyly blithely fin|
+68|139247|1761|7|41|52735.84|0.09|0.08|N|O|1998-06-24|1998-06-27|1998-07-06|NONE|SHIP|eposits nag special ideas. furiousl|
+69|115209|7721|1|48|58761.60|0.01|0.07|A|F|1994-08-17|1994-08-11|1994-09-08|NONE|TRUCK|regular epitaphs. carefully even ideas hag|
+69|104180|9201|2|32|37893.76|0.08|0.06|A|F|1994-08-24|1994-08-17|1994-08-31|NONE|REG AIR|s sleep carefully bold, |
+69|137267|4807|3|17|22172.42|0.09|0.00|A|F|1994-07-02|1994-07-07|1994-07-03|TAKE BACK RETURN|AIR|final, pending instr|
+69|37502|2509|4|3|4318.50|0.09|0.04|R|F|1994-06-06|1994-07-27|1994-06-15|NONE|MAIL| blithely final d|
+69|92070|7089|5|42|44606.94|0.07|0.04|R|F|1994-07-31|1994-07-26|1994-08-28|DELIVER IN PERSON|REG AIR|tect regular, speci|
+69|18504|1006|6|23|32717.50|0.05|0.00|A|F|1994-10-03|1994-08-06|1994-10-24|NONE|SHIP|nding accounts ca|
+70|64128|9141|1|8|8736.96|0.03|0.08|R|F|1994-01-12|1994-02-27|1994-01-14|TAKE BACK RETURN|FOB|ggle. carefully pending dependenc|
+70|196156|1195|2|13|16277.95|0.06|0.06|A|F|1994-03-03|1994-02-13|1994-03-26|COLLECT COD|AIR|lyly special packag|
+70|179809|7361|3|1|1888.80|0.03|0.05|R|F|1994-01-26|1994-03-05|1994-01-28|TAKE BACK RETURN|RAIL|quickly. fluffily unusual theodolites c|
+70|45734|743|4|11|18477.03|0.01|0.05|A|F|1994-03-17|1994-03-17|1994-03-27|NONE|MAIL|alongside of the deposits. fur|
+70|37131|2138|5|37|39520.81|0.09|0.04|R|F|1994-02-13|1994-03-16|1994-02-21|COLLECT COD|MAIL|n accounts are. q|
+70|55655|3171|6|19|30602.35|0.06|0.03|A|F|1994-01-26|1994-02-17|1994-02-06|TAKE BACK RETURN|SHIP| packages wake pending accounts.|
+71|61931|1932|1|25|47323.25|0.09|0.07|N|O|1998-04-10|1998-04-22|1998-04-11|COLLECT COD|FOB|ckly. slyly|
+71|65916|3435|2|3|5645.73|0.09|0.07|N|O|1998-05-23|1998-04-03|1998-06-02|COLLECT COD|SHIP|y. pinto beans haggle after the|
+71|34432|1942|3|45|61489.35|0.00|0.07|N|O|1998-02-23|1998-03-20|1998-03-24|DELIVER IN PERSON|SHIP| ironic packages believe blithely a|
+71|96645|9155|4|33|54174.12|0.00|0.01|N|O|1998-04-12|1998-03-20|1998-04-15|NONE|FOB| serve quickly fluffily bold deposi|
+71|103255|5766|5|39|49071.75|0.08|0.06|N|O|1998-01-29|1998-04-07|1998-02-18|DELIVER IN PERSON|RAIL|l accounts sleep across the pack|
+71|195635|674|6|34|58841.42|0.04|0.01|N|O|1998-03-05|1998-04-22|1998-03-30|DELIVER IN PERSON|TRUCK|s cajole. |
+96|123076|613|1|23|25278.61|0.10|0.06|A|F|1994-07-19|1994-06-29|1994-07-25|DELIVER IN PERSON|TRUCK|ep-- carefully reg|
+96|135390|5391|2|30|42761.70|0.01|0.06|R|F|1994-06-03|1994-05-29|1994-06-22|DELIVER IN PERSON|TRUCK|e quickly even ideas. furiou|
+97|119477|1989|1|13|19454.11|0.00|0.02|R|F|1993-04-01|1993-04-04|1993-04-08|NONE|TRUCK|ayers cajole against the furiously|
+97|49568|2073|2|37|56149.72|0.02|0.06|A|F|1993-04-13|1993-03-30|1993-04-14|DELIVER IN PERSON|SHIP|ic requests boost carefully quic|
+97|77699|5221|3|19|31857.11|0.06|0.08|R|F|1993-05-14|1993-03-05|1993-05-25|TAKE BACK RETURN|RAIL|gifts. furiously ironic packages cajole. |
+98|40216|217|1|28|32373.88|0.06|0.07|A|F|1994-12-24|1994-10-25|1995-01-16|COLLECT COD|REG AIR| pending, regular accounts s|
+98|109743|7274|2|1|1752.74|0.00|0.00|A|F|1994-12-01|1994-12-12|1994-12-15|DELIVER IN PERSON|TRUCK|. unusual instructions against|
+98|44706|4707|3|14|23109.80|0.05|0.02|A|F|1994-12-30|1994-11-22|1995-01-27|COLLECT COD|AIR| cajole furiously. blithely ironic ideas |
+98|167180|7181|4|10|12471.80|0.03|0.03|A|F|1994-10-23|1994-11-08|1994-11-09|COLLECT COD|RAIL| carefully. quickly ironic ideas|
+99|87114|4639|1|10|11011.10|0.02|0.01|A|F|1994-05-18|1994-06-03|1994-05-23|COLLECT COD|RAIL|kages. requ|
+99|123766|3767|2|5|8948.80|0.02|0.07|R|F|1994-05-06|1994-05-28|1994-05-20|TAKE BACK RETURN|RAIL|ests cajole fluffily waters. blithe|
+99|134082|1622|3|42|46875.36|0.02|0.02|A|F|1994-04-19|1994-05-18|1994-04-20|NONE|RAIL|kages are fluffily furiously ir|
+99|108338|849|4|36|48467.88|0.09|0.02|A|F|1994-07-04|1994-04-17|1994-07-30|DELIVER IN PERSON|AIR|slyly. slyly e|
+100|62029|2030|1|28|27748.56|0.04|0.05|N|O|1998-05-08|1998-05-13|1998-06-07|COLLECT COD|TRUCK|sts haggle. slowl|
+100|115979|8491|2|22|43889.34|0.00|0.07|N|O|1998-06-24|1998-04-12|1998-06-29|DELIVER IN PERSON|SHIP|nto beans alongside of the fi|
+100|46150|8655|3|46|50422.90|0.03|0.04|N|O|1998-05-02|1998-04-10|1998-05-22|TAKE BACK RETURN|SHIP|ular accounts. even|
+100|38024|3031|4|14|13468.28|0.06|0.03|N|O|1998-05-22|1998-05-01|1998-06-03|COLLECT COD|MAIL|y. furiously ironic ideas gr|
+100|53439|955|5|37|51519.91|0.05|0.00|N|O|1998-03-06|1998-04-16|1998-03-31|TAKE BACK RETURN|TRUCK|nd the quickly s|
+101|118282|5816|1|49|63713.72|0.10|0.00|N|O|1996-06-21|1996-05-27|1996-06-29|DELIVER IN PERSON|REG AIR|ts-- final packages sleep furiousl|
+101|163334|883|2|36|50303.88|0.00|0.01|N|O|1996-05-19|1996-05-01|1996-06-04|DELIVER IN PERSON|AIR|tes. blithely pending dolphins x-ray f|
+101|138418|5958|3|12|17476.92|0.06|0.02|N|O|1996-03-29|1996-04-20|1996-04-12|COLLECT COD|MAIL|. quickly regular|
+102|88914|3931|1|37|70407.67|0.06|0.00|N|O|1997-07-24|1997-08-02|1997-08-07|TAKE BACK RETURN|SHIP|ully across the ideas. final deposit|
+102|169238|6787|2|34|44445.82|0.03|0.08|N|O|1997-08-09|1997-07-28|1997-08-26|TAKE BACK RETURN|SHIP|eposits cajole across|
+102|182321|4840|3|25|35083.00|0.01|0.01|N|O|1997-07-31|1997-07-24|1997-08-17|NONE|RAIL|bits. ironic accoun|
+102|61158|8677|4|15|16787.25|0.07|0.07|N|O|1997-06-02|1997-07-13|1997-06-04|DELIVER IN PERSON|SHIP|final packages. carefully even excu|
+103|194658|2216|1|6|10515.90|0.03|0.05|N|O|1996-10-11|1996-07-25|1996-10-28|NONE|FOB|cajole. carefully ex|
+103|10426|2928|2|37|49447.54|0.02|0.07|N|O|1996-09-17|1996-07-27|1996-09-20|TAKE BACK RETURN|MAIL|ies. quickly ironic requests use blithely|
+103|28431|8432|3|23|31266.89|0.01|0.04|N|O|1996-09-11|1996-09-18|1996-09-26|NONE|FOB|ironic accou|
+103|29022|4027|4|32|30432.64|0.01|0.07|N|O|1996-07-30|1996-08-06|1996-08-04|NONE|RAIL|kages doze. special, regular deposit|
+128|106828|9339|1|38|69723.16|0.06|0.01|A|F|1992-09-01|1992-08-27|1992-10-01|TAKE BACK RETURN|FOB| cajole careful|
+129|2867|5368|1|46|81413.56|0.08|0.02|R|F|1993-02-15|1993-01-24|1993-03-05|COLLECT COD|TRUCK|uietly bold theodolites. fluffil|
+129|185164|5165|2|36|44969.76|0.01|0.02|A|F|1992-11-25|1992-12-25|1992-12-09|TAKE BACK RETURN|REG AIR|packages are care|
+129|39444|1948|3|33|45653.52|0.04|0.06|A|F|1993-01-08|1993-02-14|1993-01-29|COLLECT COD|SHIP|sts nag bravely. fluffily|
+129|135137|164|4|34|39852.42|0.00|0.01|R|F|1993-01-29|1993-02-14|1993-02-10|COLLECT COD|MAIL|quests. express ideas|
+129|31373|8883|5|24|31304.88|0.06|0.00|A|F|1992-12-07|1993-01-02|1992-12-11|TAKE BACK RETURN|FOB|uests. foxes cajole slyly after the ca|
+129|77050|4572|6|22|22595.10|0.06|0.01|R|F|1993-02-15|1993-01-31|1993-02-24|COLLECT COD|SHIP|e. fluffily regular |
+129|168569|3602|7|1|1637.56|0.05|0.04|R|F|1993-01-26|1993-01-08|1993-02-24|DELIVER IN PERSON|FOB|e carefully blithely bold dolp|
+130|128816|8817|1|14|25827.34|0.08|0.05|A|F|1992-08-15|1992-07-25|1992-09-13|COLLECT COD|RAIL| requests. final instruction|
+130|1739|4240|2|48|78755.04|0.03|0.02|R|F|1992-07-01|1992-07-12|1992-07-24|NONE|AIR|lithely alongside of the regu|
+130|11860|1861|3|18|31893.48|0.04|0.08|A|F|1992-07-04|1992-06-14|1992-07-29|DELIVER IN PERSON|MAIL| slyly ironic decoys abou|
+130|115635|3169|4|13|21458.19|0.09|0.02|R|F|1992-06-26|1992-07-29|1992-07-05|NONE|FOB| pending dolphins sleep furious|
+130|69130|4143|5|31|34073.03|0.06|0.05|R|F|1992-09-01|1992-07-18|1992-09-02|TAKE BACK RETURN|RAIL|thily about the ruth|
+131|167505|22|1|45|70762.50|0.10|0.02|R|F|1994-09-14|1994-09-02|1994-10-04|NONE|FOB|ironic, bold accounts. careful|
+131|44255|9264|2|50|59962.50|0.02|0.04|A|F|1994-09-17|1994-08-10|1994-09-21|NONE|SHIP|ending requests. final, ironic pearls slee|
+131|189021|1540|3|4|4440.08|0.04|0.03|A|F|1994-09-20|1994-08-30|1994-09-23|COLLECT COD|REG AIR| are carefully slyly i|
+132|140449|2964|1|18|26809.92|0.00|0.08|R|F|1993-07-10|1993-08-05|1993-07-13|NONE|TRUCK|ges. platelets wake furio|
+132|119053|9054|2|43|46098.15|0.01|0.08|R|F|1993-09-01|1993-08-16|1993-09-22|NONE|TRUCK|y pending theodolites|
+132|114419|4420|3|32|45869.12|0.04|0.04|A|F|1993-07-12|1993-08-05|1993-08-05|COLLECT COD|TRUCK|d instructions hagg|
+132|28082|5589|4|23|23231.84|0.10|0.00|A|F|1993-06-16|1993-08-27|1993-06-23|DELIVER IN PERSON|AIR|refully blithely bold acco|
+133|103432|5943|1|27|38756.61|0.00|0.02|N|O|1997-12-21|1998-02-23|1997-12-27|TAKE BACK RETURN|MAIL|yly even gifts after the sl|
+133|176279|3831|2|12|16263.24|0.02|0.06|N|O|1997-12-02|1998-01-15|1997-12-29|DELIVER IN PERSON|REG AIR|ts cajole fluffily quickly i|
+133|117350|4884|3|29|39653.15|0.09|0.08|N|O|1998-02-28|1998-01-30|1998-03-09|DELIVER IN PERSON|RAIL| the carefully regular theodoli|
+133|89855|7380|4|11|20293.35|0.06|0.01|N|O|1998-03-21|1998-01-15|1998-04-04|DELIVER IN PERSON|REG AIR|e quickly across the dolphins|
+134|641|642|1|21|32374.44|0.00|0.03|A|F|1992-07-17|1992-07-08|1992-07-26|COLLECT COD|SHIP|s. quickly regular|
+134|164645|9678|2|35|59837.40|0.06|0.07|A|F|1992-08-23|1992-06-01|1992-08-24|NONE|MAIL|ajole furiously. instructio|
+134|188252|3289|3|26|34846.50|0.09|0.06|A|F|1992-06-20|1992-07-12|1992-07-16|NONE|RAIL| among the pending depos|
+134|144002|4003|4|47|49162.00|0.05|0.00|A|F|1992-08-16|1992-07-06|1992-08-28|NONE|REG AIR|s! carefully unusual requests boost careful|
+134|35172|5173|5|12|13286.04|0.05|0.02|A|F|1992-07-03|1992-06-01|1992-07-11|COLLECT COD|TRUCK|nts are quic|
+134|133103|5617|6|12|13633.20|0.00|0.00|A|F|1992-08-08|1992-07-07|1992-08-20|TAKE BACK RETURN|FOB|lyly regular pac|
+135|108205|8206|1|47|57020.40|0.06|0.08|N|O|1996-02-18|1996-01-01|1996-02-25|COLLECT COD|RAIL|ctions wake slyly abo|
+135|198344|5902|2|21|30289.14|0.00|0.07|N|O|1996-02-11|1996-01-12|1996-02-13|DELIVER IN PERSON|SHIP| deposits believe. furiously regular p|
+135|157510|5056|3|33|51727.83|0.02|0.00|N|O|1996-01-03|1995-11-21|1996-02-01|TAKE BACK RETURN|MAIL|ptotes boost slowly care|
+135|67005|9512|4|34|33048.00|0.02|0.03|N|O|1996-01-12|1996-01-19|1996-02-05|NONE|TRUCK|counts doze against the blithely ironi|
+135|136248|1275|5|20|25684.80|0.01|0.04|N|O|1996-01-25|1995-11-20|1996-02-09|NONE|MAIL|theodolites. quickly p|
+135|115000|2534|6|13|13195.00|0.04|0.02|N|O|1995-11-12|1995-12-22|1995-11-17|NONE|FOB|nal ideas. final instr|
+160|14785|9788|1|36|61192.08|0.07|0.01|N|O|1997-03-11|1997-03-11|1997-03-20|COLLECT COD|MAIL|old, ironic deposits are quickly abov|
+160|86382|8891|2|22|30104.36|0.00|0.04|N|O|1997-02-18|1997-03-05|1997-03-05|COLLECT COD|RAIL|ncies about the request|
+160|20080|5085|3|34|34002.72|0.01|0.05|N|O|1997-01-31|1997-03-13|1997-02-14|NONE|FOB|st sleep even gifts. dependencies along|
+161|102810|341|1|19|34443.39|0.01|0.01|A|F|1994-12-13|1994-11-19|1994-12-26|DELIVER IN PERSON|TRUCK|, regular sheaves sleep along|
+162|189288|4325|1|2|2754.56|0.02|0.01|N|O|1995-09-02|1995-06-17|1995-09-08|COLLECT COD|FOB|es! final somas integrate|
+163|167545|5094|1|43|69339.22|0.01|0.00|N|O|1997-09-19|1997-11-19|1997-10-03|COLLECT COD|REG AIR|al, bold dependencies wake. iron|
+163|120702|703|2|13|22395.10|0.01|0.04|N|O|1997-11-11|1997-10-18|1997-12-07|DELIVER IN PERSON|TRUCK|inal requests. even pinto beans hag|
+163|36818|9322|3|27|47379.87|0.04|0.08|N|O|1997-12-26|1997-11-28|1998-01-05|COLLECT COD|REG AIR|ously express dependen|
+163|192642|5162|4|5|8673.20|0.02|0.00|N|O|1997-11-17|1997-10-09|1997-12-05|DELIVER IN PERSON|TRUCK| must belie|
+163|126090|8603|5|12|13393.08|0.10|0.00|N|O|1997-12-18|1997-10-26|1997-12-22|COLLECT COD|TRUCK|ly blithe accounts cajole |
+163|190825|5864|6|20|38316.40|0.00|0.07|N|O|1997-09-27|1997-11-15|1997-10-07|TAKE BACK RETURN|FOB|tructions integrate b|
+164|91309|3819|1|26|33807.80|0.09|0.04|A|F|1993-01-04|1992-11-21|1993-01-07|NONE|RAIL|s. blithely special courts are blithel|
+164|18488|3491|2|24|33755.52|0.05|0.05|R|F|1992-12-22|1992-11-27|1993-01-06|NONE|AIR|side of the slyly unusual theodolites. f|
+164|125509|3046|3|38|58311.00|0.03|0.06|R|F|1992-12-04|1992-11-23|1993-01-02|TAKE BACK RETURN|AIR|counts cajole fluffily regular packages. b|
+164|17526|28|4|32|46192.64|0.05|0.01|R|F|1992-12-21|1992-12-23|1992-12-28|COLLECT COD|RAIL|ts wake again|
+164|147505|2534|5|43|66757.50|0.06|0.01|R|F|1992-11-26|1993-01-03|1992-12-08|COLLECT COD|RAIL|y carefully regular dep|
+164|108896|8897|6|27|51432.03|0.10|0.04|R|F|1992-12-23|1993-01-16|1993-01-10|DELIVER IN PERSON|AIR|ayers wake carefully a|
+164|3037|5538|7|23|21620.69|0.09|0.04|A|F|1992-11-03|1992-12-02|1992-11-12|NONE|REG AIR|ress packages haggle ideas. blithely spec|
+165|33175|8182|1|3|3324.51|0.01|0.08|R|F|1993-03-29|1993-03-06|1993-04-12|DELIVER IN PERSON|REG AIR|riously requests. depos|
+165|161627|9176|2|43|72610.66|0.08|0.05|R|F|1993-02-27|1993-04-19|1993-03-03|DELIVER IN PERSON|TRUCK|jole slyly according |
+165|58520|6036|3|15|22177.80|0.00|0.05|R|F|1993-04-10|1993-03-29|1993-05-01|COLLECT COD|SHIP| bold packages mainta|
+165|139190|4217|4|49|60230.31|0.07|0.06|A|F|1993-02-20|1993-04-02|1993-03-10|COLLECT COD|REG AIR|uses sleep slyly ruthlessly regular a|
+165|155084|7600|5|27|30755.16|0.01|0.04|R|F|1993-04-27|1993-03-04|1993-05-13|NONE|MAIL|around the ironic, even orb|
+166|64888|9901|1|37|68556.56|0.09|0.03|N|O|1995-11-16|1995-10-17|1995-12-13|NONE|MAIL|lar frays wake blithely a|
+166|166366|6367|2|13|18620.68|0.09|0.05|N|O|1995-11-09|1995-11-18|1995-11-14|COLLECT COD|SHIP|fully above the blithely fina|
+166|99652|2162|3|41|67717.65|0.07|0.03|N|O|1995-11-13|1995-11-07|1995-12-08|COLLECT COD|FOB|hily along the blithely pending fo|
+166|45027|7532|4|8|7776.16|0.05|0.02|N|O|1995-12-30|1995-11-29|1996-01-29|DELIVER IN PERSON|RAIL|e carefully bold |
+167|101171|1172|1|28|32820.76|0.06|0.01|R|F|1993-02-19|1993-02-16|1993-03-03|DELIVER IN PERSON|TRUCK|sly during the u|
+167|171555|4073|2|27|43916.85|0.09|0.00|R|F|1993-05-01|1993-03-31|1993-05-31|TAKE BACK RETURN|FOB|eans affix furiously-- packages|
+192|97017|2036|1|23|23322.23|0.00|0.00|N|O|1998-02-05|1998-02-06|1998-03-07|TAKE BACK RETURN|AIR|ly pending theodolites haggle quickly fluf|
+192|161368|8917|2|20|28587.20|0.07|0.01|N|O|1998-03-13|1998-02-02|1998-03-31|TAKE BACK RETURN|REG AIR|tes. carefu|
+192|110252|5275|3|15|18933.75|0.09|0.01|N|O|1998-01-30|1998-02-10|1998-02-23|TAKE BACK RETURN|TRUCK|he ironic requests haggle about|
+192|196400|3958|4|2|2992.80|0.06|0.02|N|O|1998-03-06|1998-02-03|1998-03-24|COLLECT COD|SHIP|s. dependencies nag furiously alongside|
+192|82915|7932|5|25|47447.75|0.02|0.03|N|O|1998-02-15|1998-01-11|1998-03-17|COLLECT COD|TRUCK|. carefully regular|
+192|141003|3518|6|45|46980.00|0.00|0.05|N|O|1998-03-11|1998-01-09|1998-04-03|NONE|MAIL|equests. ideas sleep idea|
+193|92638|5148|1|9|14675.67|0.06|0.06|A|F|1993-09-17|1993-10-08|1993-09-30|COLLECT COD|TRUCK|against the fluffily regular d|
+193|153954|1500|2|15|30119.25|0.02|0.07|R|F|1993-11-22|1993-10-09|1993-12-05|TAKE BACK RETURN|SHIP|ffily. regular packages d|
+193|93878|6388|3|23|43053.01|0.06|0.05|A|F|1993-08-21|1993-10-11|1993-09-02|DELIVER IN PERSON|TRUCK|ly even accounts wake blithely bold|
+194|2594|5095|1|17|25442.03|0.05|0.04|R|F|1992-05-24|1992-05-22|1992-05-30|COLLECT COD|AIR| regular deposi|
+194|183523|6042|2|1|1606.52|0.04|0.06|R|F|1992-04-30|1992-05-18|1992-05-23|NONE|REG AIR| regular theodolites. regular, iron|
+194|65994|3513|3|13|25479.87|0.08|0.08|A|F|1992-05-07|1992-06-18|1992-05-10|NONE|AIR|about the blit|
+194|145146|5147|4|36|42881.04|0.00|0.05|R|F|1992-05-21|1992-05-18|1992-05-27|TAKE BACK RETURN|RAIL|pecial packages wake after the slyly r|
+194|56176|1187|5|8|9057.36|0.04|0.00|R|F|1992-07-06|1992-06-25|1992-07-11|COLLECT COD|FOB|uriously unusual excuses|
+194|148984|1499|6|16|32527.68|0.06|0.03|A|F|1992-05-14|1992-06-14|1992-05-21|TAKE BACK RETURN|TRUCK|y regular requests. furious|
+194|167828|345|7|21|39812.22|0.02|0.01|R|F|1992-05-06|1992-05-20|1992-05-07|COLLECT COD|REG AIR|accounts detect quickly dogged |
+195|84590|9607|1|6|9447.54|0.04|0.02|A|F|1994-01-09|1994-03-27|1994-01-28|COLLECT COD|REG AIR|y, even deposits haggle carefully. bli|
+195|93847|1375|2|41|75474.44|0.05|0.07|A|F|1994-02-24|1994-02-11|1994-03-20|NONE|TRUCK|rts detect in place of t|
+195|85446|7955|3|34|48668.96|0.08|0.08|R|F|1994-01-31|1994-02-11|1994-02-12|NONE|TRUCK| cajole furiously bold i|
+195|85442|7951|4|41|58525.04|0.06|0.04|R|F|1994-03-14|1994-03-13|1994-04-09|COLLECT COD|RAIL|ggle fluffily foxes. fluffily ironic ex|
+196|135052|79|1|19|20653.95|0.03|0.02|R|F|1993-04-17|1993-05-27|1993-04-30|NONE|SHIP|sts maintain foxes. furiously regular p|
+196|9852|2353|2|15|26427.75|0.03|0.04|A|F|1993-07-05|1993-05-08|1993-07-06|TAKE BACK RETURN|SHIP|s accounts. furio|
+197|98494|1004|1|39|58207.11|0.02|0.04|N|O|1995-07-21|1995-07-01|1995-08-14|TAKE BACK RETURN|AIR|press accounts. daringly sp|
+197|177103|9621|2|8|9440.80|0.09|0.02|A|F|1995-04-17|1995-07-01|1995-04-27|DELIVER IN PERSON|SHIP|y blithely even deposits. blithely fina|
+197|155829|8345|3|17|32041.94|0.06|0.02|N|O|1995-08-02|1995-06-23|1995-08-03|COLLECT COD|REG AIR|ts. careful|
+197|17936|2939|4|25|46348.25|0.04|0.01|N|F|1995-06-13|1995-05-23|1995-06-24|TAKE BACK RETURN|FOB|s-- quickly final accounts|
+197|41466|3971|5|14|19704.44|0.09|0.01|R|F|1995-05-08|1995-05-24|1995-05-12|TAKE BACK RETURN|RAIL|use slyly slyly silent depo|
+197|105880|901|6|1|1885.88|0.07|0.05|N|O|1995-07-15|1995-06-21|1995-08-11|COLLECT COD|RAIL| even, thin dependencies sno|
+198|56061|6062|1|33|33562.98|0.07|0.02|N|O|1998-01-05|1998-03-20|1998-01-10|TAKE BACK RETURN|TRUCK|carefully caref|
+198|15229|7731|2|20|22884.40|0.03|0.00|N|O|1998-01-15|1998-03-31|1998-01-25|DELIVER IN PERSON|FOB|carefully final escapades a|
+198|148058|3087|3|15|16590.75|0.04|0.02|N|O|1998-04-12|1998-02-26|1998-04-15|COLLECT COD|MAIL|es. quickly pending deposits s|
+198|10371|2873|4|35|44847.95|0.08|0.02|N|O|1998-02-27|1998-03-23|1998-03-14|TAKE BACK RETURN|RAIL|ests nod quickly furiously sly pinto be|
+198|101952|1953|5|33|64480.35|0.02|0.01|N|O|1998-03-22|1998-03-12|1998-04-14|DELIVER IN PERSON|SHIP|ending foxes acr|
+199|132072|9612|1|50|55203.50|0.02|0.00|N|O|1996-06-12|1996-06-03|1996-07-04|DELIVER IN PERSON|MAIL|essly regular ideas boost sly|
+199|133998|3999|2|30|60959.70|0.08|0.05|N|O|1996-03-27|1996-05-29|1996-04-14|NONE|TRUCK|ilent packages doze quickly. thinly |
+224|150111|112|1|16|18577.76|0.04|0.00|A|F|1994-08-01|1994-07-30|1994-08-27|DELIVER IN PERSON|MAIL|y unusual foxes |
+224|108609|1120|2|34|54998.40|0.04|0.08|R|F|1994-07-13|1994-08-25|1994-07-31|COLLECT COD|TRUCK| carefully. final platelets |
+224|189967|7522|3|41|84335.36|0.07|0.04|A|F|1994-09-01|1994-09-15|1994-09-02|TAKE BACK RETURN|SHIP|after the furiou|
+224|166377|1410|4|12|17320.44|0.08|0.06|R|F|1994-10-12|1994-08-29|1994-10-20|DELIVER IN PERSON|MAIL|uriously regular packages. slyly fina|
+224|93857|8876|5|45|83288.25|0.07|0.07|R|F|1994-08-14|1994-09-02|1994-08-27|COLLECT COD|AIR|leep furiously regular requests. furiousl|
+224|50010|7526|6|4|3840.04|0.02|0.00|R|F|1994-09-08|1994-08-24|1994-10-04|DELIVER IN PERSON|FOB|tructions |
+225|171925|1926|1|4|7987.68|0.09|0.07|N|O|1995-08-05|1995-08-19|1995-09-03|TAKE BACK RETURN|SHIP|ng the ironic packages. asymptotes among |
+225|130565|8105|2|3|4786.68|0.00|0.08|N|O|1995-07-25|1995-07-08|1995-08-17|DELIVER IN PERSON|REG AIR| fluffily about the carefully bold a|
+225|198212|3251|3|45|58959.45|0.06|0.01|N|O|1995-08-17|1995-08-20|1995-08-30|TAKE BACK RETURN|FOB|the slyly even platelets use aro|
+225|146071|8586|4|24|26809.68|0.00|0.06|N|O|1995-09-23|1995-08-05|1995-10-16|COLLECT COD|MAIL|ironic accounts are final account|
+225|7589|5090|5|31|46393.98|0.04|0.06|N|O|1995-06-21|1995-07-24|1995-07-04|TAKE BACK RETURN|FOB|special platelets. quickly r|
+225|131835|9375|6|12|22401.96|0.00|0.00|A|F|1995-06-04|1995-07-15|1995-06-08|COLLECT COD|MAIL| unusual requests. bus|
+225|141233|8776|7|44|56066.12|0.10|0.06|N|O|1995-09-22|1995-08-16|1995-10-22|NONE|REG AIR|leep slyly |
+226|96909|9419|1|4|7623.60|0.00|0.00|R|F|1993-03-31|1993-04-30|1993-04-10|NONE|TRUCK|c foxes integrate carefully against th|
+226|137802|5342|2|46|84630.80|0.06|0.01|A|F|1993-07-06|1993-04-24|1993-07-13|COLLECT COD|FOB|s. carefully bold accounts cajol|
+226|37309|4819|3|35|43620.50|0.09|0.03|A|F|1993-03-31|1993-05-18|1993-04-01|NONE|RAIL|osits cajole. final, even foxes a|
+226|40633|8146|4|45|70813.35|0.10|0.02|R|F|1993-04-17|1993-05-27|1993-05-11|DELIVER IN PERSON|AIR| carefully pending pi|
+226|117956|5490|5|2|3947.90|0.07|0.02|R|F|1993-03-26|1993-04-13|1993-04-20|TAKE BACK RETURN|SHIP|al platelets. express somas |
+226|82937|7954|6|48|92156.64|0.02|0.00|A|F|1993-06-11|1993-05-15|1993-06-19|NONE|REG AIR|efully silent packages. final deposit|
+226|117961|5495|7|14|27705.44|0.09|0.00|R|F|1993-05-20|1993-06-05|1993-05-27|COLLECT COD|MAIL|ep carefully regular accounts. ironic|
+227|165335|2884|1|19|26606.27|0.05|0.06|N|O|1995-12-10|1996-01-30|1995-12-26|NONE|RAIL|s cajole furiously a|
+227|174102|1654|2|24|28226.40|0.07|0.07|N|O|1996-02-03|1995-12-24|1996-02-12|COLLECT COD|SHIP|uses across the blithe dependencies cajol|
+228|4039|6540|1|3|2829.09|0.10|0.08|A|F|1993-05-20|1993-04-08|1993-05-26|DELIVER IN PERSON|SHIP|ckages. sly|
+229|83580|8597|1|20|31271.60|0.02|0.03|R|F|1994-01-11|1994-01-31|1994-01-26|DELIVER IN PERSON|REG AIR|le. instructions use across the quickly fin|
+229|128904|8905|2|29|56054.10|0.07|0.00|A|F|1994-03-15|1994-03-02|1994-03-26|COLLECT COD|SHIP|s, final request|
+229|78526|8527|3|28|42126.56|0.02|0.02|R|F|1994-02-10|1994-02-02|1994-03-10|DELIVER IN PERSON|FOB| final, regular requests. platel|
+229|176948|1983|4|3|6074.82|0.02|0.08|R|F|1994-03-22|1994-03-24|1994-04-04|DELIVER IN PERSON|REG AIR|posits. furiously regular theodol|
+229|155180|211|5|33|40760.94|0.03|0.06|R|F|1994-03-25|1994-02-11|1994-04-13|NONE|FOB| deposits; bold, ruthless theodolites|
+229|105393|7904|6|29|40553.31|0.04|0.00|R|F|1994-01-14|1994-02-16|1994-01-22|NONE|FOB|uriously pending |
+230|185863|900|1|46|89647.56|0.09|0.00|R|F|1994-02-03|1994-01-15|1994-02-23|TAKE BACK RETURN|SHIP|old packages ha|
+230|194908|7428|2|6|12017.40|0.03|0.08|A|F|1994-01-26|1994-01-25|1994-02-13|NONE|REG AIR| sleep furiously about the p|
+230|7367|4868|3|1|1274.36|0.07|0.06|R|F|1994-01-22|1994-01-03|1994-02-05|TAKE BACK RETURN|RAIL|blithely unusual dolphins. bold, ex|
+230|9164|1665|4|44|47219.04|0.08|0.06|R|F|1994-02-09|1994-01-18|1994-03-11|NONE|MAIL|deposits integrate slyly sile|
+230|18923|6427|5|8|14735.36|0.09|0.06|R|F|1993-11-03|1994-01-20|1993-11-11|TAKE BACK RETURN|TRUCK|g the instructions. fluffil|
+230|33927|1437|6|8|14887.36|0.00|0.05|R|F|1993-11-21|1994-01-05|1993-12-19|TAKE BACK RETURN|FOB|nal ideas. silent, reg|
+231|158356|8357|1|16|22629.60|0.04|0.08|R|F|1994-11-20|1994-10-29|1994-12-17|TAKE BACK RETURN|AIR|e furiously ironic pinto beans.|
+231|83359|884|2|46|61748.10|0.04|0.05|R|F|1994-12-13|1994-12-02|1994-12-14|DELIVER IN PERSON|SHIP|affix blithely. bold requests among the f|
+231|198124|644|3|50|61106.00|0.09|0.01|A|F|1994-12-11|1994-12-14|1994-12-13|NONE|RAIL|onic packages haggle fluffily a|
+231|56760|6761|4|31|53219.56|0.08|0.02|A|F|1994-11-05|1994-12-27|1994-11-30|TAKE BACK RETURN|SHIP|iously special decoys wake q|
+256|88233|742|1|22|26867.06|0.09|0.02|R|F|1994-01-12|1993-12-28|1994-01-26|COLLECT COD|FOB|ke quickly ironic, ironic deposits. reg|
+256|118399|3422|2|40|56695.60|0.10|0.01|A|F|1993-11-30|1993-12-13|1993-12-02|NONE|FOB|nal theodolites. deposits cajole s|
+256|129111|4136|3|45|51304.95|0.02|0.08|R|F|1994-01-14|1994-01-17|1994-02-10|COLLECT COD|SHIP| grouches. ideas wake quickly ar|
+257|146229|6230|1|7|8926.54|0.05|0.02|N|O|1998-06-18|1998-05-15|1998-06-27|COLLECT COD|FOB|ackages sleep bold realms. f|
+258|106194|3725|1|8|9601.52|0.00|0.07|R|F|1994-01-20|1994-03-21|1994-02-09|NONE|REG AIR|ully about the fluffily silent dependencies|
+258|196119|3677|2|40|48604.40|0.10|0.01|A|F|1994-03-13|1994-02-23|1994-04-05|DELIVER IN PERSON|FOB|silent frets nod daringly busy, bold|
+258|161762|1763|3|45|82069.20|0.07|0.07|R|F|1994-03-04|1994-02-13|1994-03-30|DELIVER IN PERSON|TRUCK|regular excuses-- fluffily ruthl|
+258|132912|5426|4|31|60292.21|0.02|0.05|A|F|1994-04-20|1994-03-20|1994-04-28|COLLECT COD|REG AIR| slyly blithely special mul|
+258|35959|8463|5|25|47373.75|0.08|0.02|A|F|1994-04-13|1994-02-26|1994-04-29|TAKE BACK RETURN|TRUCK|leep pending packages.|
+258|146467|8982|6|36|54484.56|0.09|0.04|A|F|1994-01-11|1994-03-04|1994-01-18|DELIVER IN PERSON|AIR|nic asymptotes. slyly silent r|
+259|98779|8780|1|14|24888.78|0.00|0.08|A|F|1993-12-17|1993-12-09|1993-12-31|COLLECT COD|SHIP|ons against the express acco|
+259|161982|4499|2|14|28615.72|0.03|0.05|R|F|1993-11-10|1993-11-20|1993-11-17|DELIVER IN PERSON|FOB|ully even, regul|
+259|23514|3515|3|42|60375.42|0.09|0.00|R|F|1993-10-20|1993-11-18|1993-11-12|NONE|TRUCK|the slyly ironic pinto beans. fi|
+259|195335|2893|4|3|4290.99|0.08|0.06|R|F|1993-10-04|1993-11-07|1993-10-14|TAKE BACK RETURN|SHIP|ng slyly at the accounts.|
+259|192201|7240|5|6|7759.20|0.00|0.05|R|F|1993-12-05|1993-12-22|1993-12-21|COLLECT COD|TRUCK| requests sleep|
+260|155887|5888|1|50|97144.00|0.07|0.08|N|O|1997-03-24|1997-02-09|1997-04-20|TAKE BACK RETURN|REG AIR|c deposits |
+260|182736|2737|2|26|47286.98|0.02|0.07|N|O|1996-12-12|1997-02-06|1996-12-15|NONE|TRUCK|ld theodolites boost fl|
+260|41222|8735|3|27|31406.94|0.05|0.08|N|O|1997-03-23|1997-02-15|1997-04-22|TAKE BACK RETURN|RAIL|ions according to the|
+260|5337|338|4|29|36027.57|0.10|0.06|N|O|1997-03-15|1997-01-14|1997-04-13|NONE|MAIL|fluffily even asymptotes. express wa|
+260|95286|305|5|44|56376.32|0.01|0.05|N|O|1997-03-26|1997-02-03|1997-04-19|DELIVER IN PERSON|MAIL|above the blithely ironic instr|
+261|1349|6350|1|34|42511.56|0.05|0.08|R|F|1993-08-18|1993-09-24|1993-08-20|COLLECT COD|REG AIR|c packages. asymptotes da|
+261|65662|5663|2|20|32553.20|0.00|0.06|R|F|1993-10-21|1993-08-02|1993-11-04|DELIVER IN PERSON|RAIL|ites hinder |
+261|173959|8994|3|28|56922.60|0.08|0.03|R|F|1993-07-24|1993-08-20|1993-08-05|COLLECT COD|AIR|ironic packages nag slyly. carefully fin|
+261|118455|967|4|49|72199.05|0.04|0.05|R|F|1993-09-12|1993-08-31|1993-10-07|COLLECT COD|SHIP|ions. bold accounts |
+261|60469|7988|5|49|70043.54|0.01|0.08|A|F|1993-09-29|1993-09-08|1993-10-01|COLLECT COD|SHIP| pinto beans haggle slyly furiously pending|
+261|96989|9499|6|20|39719.60|0.06|0.06|A|F|1993-10-15|1993-09-05|1993-11-07|NONE|AIR|ing to the special, ironic deposi|
+262|191186|1187|1|39|49810.02|0.01|0.05|N|O|1996-01-15|1996-02-18|1996-01-28|COLLECT COD|RAIL|usual, regular requests|
+262|60074|7593|2|33|34124.31|0.09|0.03|N|O|1996-03-10|1996-01-31|1996-03-27|TAKE BACK RETURN|AIR|atelets sleep furiously. requests cajole. b|
+262|58695|6211|3|35|57879.15|0.05|0.08|N|O|1996-03-12|1996-02-14|1996-04-11|COLLECT COD|MAIL|lites cajole along the pending packag|
+263|23960|6463|1|22|41447.12|0.06|0.08|R|F|1994-08-24|1994-06-20|1994-09-09|NONE|FOB|efully express fo|
+263|84557|9574|2|9|13873.95|0.08|0.00|A|F|1994-07-21|1994-07-16|1994-08-08|TAKE BACK RETURN|TRUCK|lms wake bl|
+263|142891|434|3|50|96694.50|0.06|0.04|R|F|1994-08-18|1994-07-31|1994-08-22|NONE|TRUCK|re the packages. special|
+288|50641|8157|1|31|49340.84|0.00|0.03|N|O|1997-03-17|1997-04-28|1997-04-06|TAKE BACK RETURN|AIR|instructions wa|
+288|116386|8898|2|49|68716.62|0.08|0.05|N|O|1997-04-19|1997-05-19|1997-05-18|TAKE BACK RETURN|TRUCK|ic excuses sleep always spe|
+288|98833|8834|3|36|65945.88|0.02|0.02|N|O|1997-02-22|1997-05-07|1997-03-07|TAKE BACK RETURN|TRUCK|yly pending excu|
+288|78406|8407|4|19|26303.60|0.07|0.07|N|O|1997-03-14|1997-04-04|1997-03-26|NONE|MAIL|deposits. blithely quick courts ar|
+288|161894|6927|5|31|60632.59|0.10|0.04|N|O|1997-05-29|1997-04-24|1997-06-20|TAKE BACK RETURN|RAIL|ns. fluffily|
+289|173280|832|1|25|33832.00|0.07|0.05|N|O|1997-03-18|1997-05-05|1997-04-15|DELIVER IN PERSON|FOB|out the quickly bold theodol|
+289|111800|9334|2|6|10870.80|0.06|0.05|N|O|1997-02-18|1997-05-08|1997-03-19|DELIVER IN PERSON|SHIP|d packages use fluffily furiously|
+289|16996|1999|3|44|84171.56|0.10|0.08|N|O|1997-06-05|1997-04-20|1997-07-02|COLLECT COD|MAIL|ly ironic foxes. asymptotes |
+289|39439|1943|4|48|66164.64|0.01|0.08|N|O|1997-03-14|1997-03-30|1997-03-24|DELIVER IN PERSON|RAIL|sits cajole. bold pinto beans x-ray fl|
+289|46285|8790|5|13|16006.64|0.10|0.03|N|O|1997-06-08|1997-04-06|1997-06-18|TAKE BACK RETURN|REG AIR|ts. quickly bold deposits alongside|
+290|5351|352|1|35|43972.25|0.01|0.02|R|F|1994-04-01|1994-02-05|1994-04-27|NONE|MAIL|ove the final foxes detect slyly fluffily|
+290|128923|1436|2|2|3903.84|0.05|0.04|A|F|1994-01-30|1994-02-13|1994-02-21|TAKE BACK RETURN|TRUCK|. permanently furious reques|
+290|1888|4389|3|5|8949.40|0.03|0.05|A|F|1994-01-19|1994-02-24|1994-01-27|NONE|MAIL|ans integrate. requests sleep. fur|
+290|123741|6254|4|23|40589.02|0.05|0.08|R|F|1994-03-14|1994-02-21|1994-04-09|NONE|AIR|refully unusual packages. |
+291|122565|102|1|21|33338.76|0.05|0.07|A|F|1994-05-26|1994-05-10|1994-06-23|COLLECT COD|TRUCK|y quickly regular theodolites. final t|
+291|137316|7317|2|19|25712.89|0.08|0.02|R|F|1994-06-14|1994-04-25|1994-06-19|NONE|REG AIR|e. ruthlessly final accounts after the|
+291|60874|5887|3|30|55046.10|0.10|0.02|R|F|1994-03-22|1994-04-30|1994-03-24|DELIVER IN PERSON|FOB| fluffily regular deposits. quickl|
+292|153561|3562|1|8|12916.48|0.10|0.03|R|F|1992-02-18|1992-03-30|1992-03-18|DELIVER IN PERSON|RAIL|sily bold deposits alongside of the ex|
+292|99249|9250|2|24|29957.76|0.08|0.04|R|F|1992-03-24|1992-03-06|1992-04-20|COLLECT COD|TRUCK| bold, pending theodolites u|
+293|8960|6461|1|14|26165.44|0.02|0.05|R|F|1992-10-19|1992-12-23|1992-11-10|DELIVER IN PERSON|SHIP|es. packages above the|
+293|186406|6407|2|11|16416.40|0.10|0.04|R|F|1992-12-24|1992-12-01|1993-01-12|COLLECT COD|MAIL| affix carefully quickly special idea|
+293|117267|4801|3|13|16695.38|0.04|0.02|A|F|1992-12-17|1992-12-26|1992-12-22|COLLECT COD|RAIL| wake after the quickly even deposits. bli|
+294|59620|7136|1|31|48968.22|0.00|0.01|R|F|1993-08-06|1993-08-19|1993-08-13|TAKE BACK RETURN|AIR|le fluffily along the quick|
+295|197507|27|1|29|46530.50|0.02|0.07|A|F|1994-11-09|1994-12-08|1994-12-07|COLLECT COD|MAIL|inst the carefully ironic pinto beans. blit|
+295|91344|8872|2|26|34718.84|0.04|0.03|R|F|1994-12-13|1994-11-30|1995-01-06|DELIVER IN PERSON|AIR|ts above the slyly regular requests x-ray q|
+295|15283|7785|3|8|9586.24|0.10|0.07|R|F|1995-01-13|1994-11-17|1995-01-25|NONE|TRUCK| final instructions h|
+295|60621|3128|4|26|41122.12|0.10|0.04|A|F|1995-01-12|1994-11-22|1995-01-22|DELIVER IN PERSON|MAIL| carefully iron|
+320|4415|1916|1|30|39582.30|0.05|0.01|N|O|1997-12-04|1998-01-21|1997-12-13|NONE|RAIL| ironic, final accounts wake quick de|
+320|192158|4678|2|13|16251.95|0.03|0.00|N|O|1997-12-16|1997-12-26|1997-12-17|TAKE BACK RETURN|AIR|he furiously regular pinto beans. car|
+321|318|7819|1|21|25584.51|0.01|0.08|A|F|1993-07-18|1993-04-24|1993-08-13|TAKE BACK RETURN|REG AIR|hockey players sleep slyly sl|
+321|140433|5462|2|41|60410.63|0.08|0.07|R|F|1993-06-21|1993-06-07|1993-07-09|NONE|REG AIR|special packages shall have to doze blit|
+322|152499|7530|1|12|18617.88|0.08|0.07|A|F|1992-06-29|1992-05-30|1992-07-11|NONE|AIR|ular theodolites promise qu|
+322|43662|3663|2|48|77071.68|0.02|0.07|A|F|1992-06-11|1992-06-16|1992-06-26|COLLECT COD|RAIL|dolites detect qu|
+322|12673|177|3|20|31713.40|0.04|0.01|R|F|1992-04-26|1992-05-04|1992-05-22|DELIVER IN PERSON|MAIL|ckly toward |
+322|183246|5765|4|10|13292.40|0.06|0.03|R|F|1992-04-12|1992-05-13|1992-04-14|DELIVER IN PERSON|AIR| deposits grow slyly according to th|
+322|11605|9109|5|35|53081.00|0.07|0.06|A|F|1992-07-17|1992-05-03|1992-08-14|TAKE BACK RETURN|RAIL|egular accounts cajole carefully. even d|
+322|33310|8317|6|3|3729.93|0.08|0.05|A|F|1992-07-03|1992-05-10|1992-07-28|NONE|AIR|ending, ironic deposits along the blith|
+322|37435|4945|7|5|6862.15|0.01|0.02|A|F|1992-04-15|1992-05-12|1992-04-26|COLLECT COD|REG AIR| special grouches sleep quickly instructio|
+323|163628|1177|1|50|84581.00|0.05|0.04|A|F|1994-04-20|1994-04-25|1994-05-12|DELIVER IN PERSON|REG AIR|cial requests |
+323|95136|7646|2|18|20360.34|0.06|0.07|R|F|1994-04-13|1994-06-02|1994-05-10|DELIVER IN PERSON|TRUCK|posits cajole furiously pinto beans. |
+323|142725|2726|3|9|15909.48|0.07|0.04|A|F|1994-06-26|1994-06-10|1994-07-13|COLLECT COD|TRUCK|nic accounts. regular, regular pack|
+324|199475|4514|1|26|40936.22|0.07|0.01|R|F|1992-04-19|1992-05-28|1992-05-12|DELIVER IN PERSON|RAIL|ross the slyly regular s|
+325|158791|6337|1|34|62892.86|0.09|0.04|A|F|1993-10-28|1993-12-13|1993-11-17|TAKE BACK RETURN|MAIL|ly bold deposits. always iron|
+325|185139|7658|2|5|6120.65|0.07|0.08|A|F|1994-01-02|1994-01-05|1994-01-04|TAKE BACK RETURN|MAIL| theodolites. |
+325|18788|1290|3|35|59737.30|0.07|0.07|A|F|1993-12-06|1994-01-03|1993-12-26|DELIVER IN PERSON|REG AIR|packages wa|
+326|179094|4129|1|41|48096.69|0.06|0.03|N|O|1995-08-30|1995-07-09|1995-09-12|DELIVER IN PERSON|TRUCK|ily quickly bold ideas.|
+326|19480|1982|2|38|53180.24|0.02|0.08|N|O|1995-09-12|1995-08-23|1995-09-14|COLLECT COD|RAIL|es sleep slyly. carefully regular inst|
+326|183739|8776|3|25|45568.25|0.03|0.04|N|O|1995-08-03|1995-07-27|1995-08-16|NONE|AIR|ily furiously unusual accounts. |
+326|84836|9853|4|5|9104.15|0.03|0.08|N|O|1995-07-29|1995-07-13|1995-08-12|NONE|REG AIR|deas sleep according to the sometimes spe|
+326|34543|9550|5|31|45803.74|0.04|0.08|N|O|1995-09-27|1995-07-06|1995-10-22|NONE|TRUCK|cies sleep quick|
+326|156712|4258|6|41|72517.11|0.02|0.00|N|O|1995-07-05|1995-07-23|1995-07-20|TAKE BACK RETURN|REG AIR|to beans wake before the furiously re|
+326|42134|4639|7|47|50578.11|0.04|0.04|N|O|1995-09-16|1995-07-04|1995-10-04|NONE|REG AIR| special accounts sleep |
+327|143503|1046|1|16|24744.00|0.03|0.01|N|O|1995-07-05|1995-06-07|1995-07-09|TAKE BACK RETURN|TRUCK|cial ideas sleep af|
+327|41715|4220|2|9|14910.39|0.09|0.05|A|F|1995-05-24|1995-07-11|1995-06-05|NONE|AIR| asymptotes are fu|
+352|63762|3763|1|17|29337.92|0.07|0.05|R|F|1994-06-02|1994-05-31|1994-06-29|NONE|FOB|pending deposits sleep furiously |
+353|119305|4328|1|41|54296.30|0.00|0.06|A|F|1994-03-25|1994-03-31|1994-03-30|DELIVER IN PERSON|AIR|refully final theodoli|
+353|147542|7543|2|29|46096.66|0.09|0.00|A|F|1994-01-11|1994-03-19|1994-02-09|COLLECT COD|FOB|ctions impr|
+353|134318|1858|3|12|16227.72|0.06|0.01|R|F|1994-01-02|1994-03-26|1994-01-19|DELIVER IN PERSON|RAIL|g deposits cajole |
+353|77071|2086|4|46|48211.22|0.00|0.04|A|F|1994-04-14|1994-01-31|1994-05-05|DELIVER IN PERSON|FOB| ironic dolphins |
+353|116803|1826|5|9|16378.20|0.02|0.02|A|F|1994-03-15|1994-03-20|1994-03-18|TAKE BACK RETURN|RAIL|ual accounts! carefu|
+353|102699|2700|6|39|66365.91|0.02|0.05|A|F|1994-01-15|1994-03-30|1994-02-01|NONE|MAIL|losely quickly even accounts. c|
+354|49480|1985|1|14|20012.72|0.08|0.04|N|O|1996-04-12|1996-06-03|1996-05-08|NONE|SHIP|quickly regular grouches will eat. careful|
+354|193864|1422|2|24|46988.64|0.01|0.01|N|O|1996-05-08|1996-05-17|1996-06-07|DELIVER IN PERSON|AIR|y silent requests. regular, even accounts|
+354|58125|8126|3|50|54156.00|0.08|0.05|N|O|1996-03-21|1996-05-20|1996-04-04|COLLECT COD|TRUCK|to beans s|
+354|106672|4203|4|7|11750.69|0.06|0.01|N|O|1996-05-07|1996-04-18|1996-05-24|NONE|MAIL|ously idly ironic accounts-- quickl|
+354|30527|528|5|18|26235.36|0.04|0.08|N|O|1996-03-31|1996-05-13|1996-04-27|DELIVER IN PERSON|RAIL| about the carefully unusual |
+354|61082|3589|6|36|37550.88|0.03|0.02|N|O|1996-03-19|1996-05-29|1996-03-30|NONE|AIR|onic requests thrash bold g|
+354|4660|9661|7|14|21905.24|0.01|0.07|N|O|1996-07-06|1996-06-08|1996-07-10|TAKE BACK RETURN|MAIL|t thinly above the ironic, |
+355|113959|8982|1|31|61161.45|0.09|0.07|A|F|1994-07-13|1994-08-18|1994-07-18|DELIVER IN PERSON|FOB|y unusual, ironic|
+355|96030|3558|2|41|42067.23|0.05|0.00|A|F|1994-08-15|1994-07-19|1994-09-06|DELIVER IN PERSON|TRUCK| deposits. carefully r|
+356|45214|5215|1|4|4636.84|0.10|0.01|A|F|1994-07-28|1994-08-01|1994-08-04|DELIVER IN PERSON|REG AIR| the dependencies nod unusual, final ac|
+356|107463|2484|2|48|70582.08|0.02|0.03|R|F|1994-08-12|1994-07-31|1994-08-26|NONE|FOB|unusual packages. furiously |
+356|118002|514|3|35|35700.00|0.08|0.07|R|F|1994-10-14|1994-07-31|1994-10-23|COLLECT COD|TRUCK|s. unusual, final|
+356|55342|353|4|41|53190.94|0.07|0.05|A|F|1994-09-28|1994-09-20|1994-10-07|COLLECT COD|SHIP| according to the express foxes will|
+356|124271|1808|5|37|47924.99|0.05|0.03|A|F|1994-07-15|1994-08-24|1994-08-09|DELIVER IN PERSON|FOB|ndencies are since the packag|
+357|113143|3144|1|26|30059.64|0.06|0.03|N|O|1996-12-28|1996-11-26|1997-01-13|NONE|FOB| carefully pending accounts use a|
+357|185814|8333|2|36|68393.16|0.07|0.06|N|O|1996-12-28|1996-11-13|1997-01-24|DELIVER IN PERSON|AIR|d the carefully even requests. |
+357|164807|9840|3|32|59897.60|0.05|0.07|N|O|1997-01-28|1996-12-29|1997-02-14|NONE|MAIL|y above the carefully final accounts|
+358|190028|2548|1|41|45838.82|0.06|0.01|A|F|1993-11-18|1993-11-14|1993-11-28|NONE|TRUCK|ely frets. furious deposits sleep |
+358|189955|7510|2|32|65438.40|0.05|0.08|A|F|1993-10-18|1993-12-12|1993-10-31|NONE|TRUCK|y final foxes sleep blithely sl|
+358|168710|3743|3|40|71148.40|0.09|0.01|A|F|1993-12-05|1993-11-04|1994-01-01|COLLECT COD|MAIL|ng the ironic theo|
+358|96557|1576|4|15|23303.25|0.08|0.08|A|F|1993-10-04|1993-12-17|1993-10-23|TAKE BACK RETURN|MAIL|out the blithely ironic deposits slee|
+358|28629|6136|5|18|28037.16|0.01|0.02|R|F|1993-10-07|1993-11-01|1993-10-26|COLLECT COD|SHIP|olphins haggle ironic accounts. f|
+358|161283|1284|6|32|43016.96|0.03|0.05|R|F|1993-12-21|1993-11-06|1994-01-17|DELIVER IN PERSON|RAIL|lyly express deposits |
+358|82916|7933|7|45|85450.95|0.05|0.02|A|F|1993-12-08|1993-10-29|1993-12-30|NONE|REG AIR|to beans. regular, unusual deposits sl|
+359|165980|5981|1|30|61379.40|0.00|0.08|A|F|1995-01-06|1995-02-20|1995-01-20|TAKE BACK RETURN|AIR|uses detect spec|
+359|11158|6161|2|18|19244.70|0.00|0.03|A|F|1995-01-27|1995-03-18|1995-01-31|DELIVER IN PERSON|RAIL|unusual warthogs. ironically sp|
+359|131463|3977|3|17|25405.82|0.07|0.06|A|F|1995-01-31|1995-03-18|1995-02-10|COLLECT COD|SHIP|sts according to the blithely|
+359|89985|2494|4|38|75049.24|0.10|0.08|R|F|1995-03-30|1995-01-20|1995-04-25|DELIVER IN PERSON|RAIL|g furiously. regular, sile|
+359|167239|2272|5|11|14368.53|0.01|0.03|A|F|1995-02-15|1995-01-27|1995-02-18|NONE|FOB|rets wake blithely. slyly final dep|
+359|182663|218|6|23|40150.18|0.04|0.07|R|F|1995-01-31|1995-03-11|1995-02-16|DELIVER IN PERSON|REG AIR|ic courts snooze quickly furiously final fo|
+384|178442|3477|1|38|57776.72|0.07|0.01|R|F|1992-06-02|1992-04-18|1992-06-10|DELIVER IN PERSON|TRUCK|totes cajole blithely against the even|
+384|63342|5849|2|49|63961.66|0.09|0.07|A|F|1992-04-01|1992-04-25|1992-04-18|COLLECT COD|AIR|refully carefully ironic instructions. bl|
+384|181502|6539|3|11|17418.50|0.02|0.08|A|F|1992-04-02|1992-04-21|1992-04-15|COLLECT COD|MAIL|ash carefully|
+384|92053|7072|4|11|11495.55|0.00|0.06|R|F|1992-06-24|1992-05-29|1992-07-22|COLLECT COD|TRUCK|nic excuses are furiously above the blith|
+384|131403|8943|5|14|20081.60|0.08|0.06|R|F|1992-06-14|1992-05-29|1992-07-05|DELIVER IN PERSON|TRUCK|ckages are slyly after the slyly specia|
+385|166446|8963|1|7|10587.08|0.05|0.06|N|O|1996-05-23|1996-05-09|1996-06-06|DELIVER IN PERSON|REG AIR| special asymptote|
+385|53025|8036|2|46|44988.92|0.08|0.07|N|O|1996-03-29|1996-05-17|1996-04-18|NONE|REG AIR|lthily ironic f|
+386|152405|9951|1|39|56838.60|0.10|0.07|A|F|1995-05-10|1995-02-28|1995-05-25|NONE|SHIP|hely. carefully regular accounts hag|
+386|68123|5642|2|16|17457.92|0.06|0.01|A|F|1995-04-12|1995-04-18|1995-05-11|DELIVER IN PERSON|MAIL|lithely fluffi|
+386|130081|82|3|37|41109.96|0.09|0.04|A|F|1995-05-23|1995-03-01|1995-05-25|TAKE BACK RETURN|MAIL|ending pearls breach fluffily. slyly pen|
+387|136667|1694|1|1|1703.66|0.08|0.03|N|O|1997-05-06|1997-04-23|1997-05-10|NONE|SHIP| pinto beans wake furiously carefu|
+387|152800|2801|2|42|77817.60|0.07|0.05|N|O|1997-05-25|1997-02-25|1997-05-29|DELIVER IN PERSON|RAIL|lithely final theodolites.|
+387|96392|1411|3|40|55535.60|0.09|0.02|N|O|1997-03-08|1997-04-18|1997-03-31|COLLECT COD|TRUCK| quickly ironic platelets are slyly. fluff|
+387|55927|5928|4|19|35775.48|0.08|0.00|N|O|1997-03-14|1997-04-21|1997-04-04|NONE|REG AIR|gular dependencies|
+387|148313|828|5|32|43561.92|0.08|0.06|N|O|1997-05-02|1997-04-11|1997-05-11|DELIVER IN PERSON|TRUCK|gle. silent, fur|
+388|32590|100|1|42|63948.78|0.05|0.06|R|F|1993-02-21|1993-02-26|1993-03-15|COLLECT COD|FOB|accounts sleep furiously|
+388|127808|7809|2|46|84446.80|0.07|0.01|A|F|1993-03-22|1993-01-26|1993-03-24|COLLECT COD|FOB|to beans nag about the careful reque|
+388|64486|9499|3|40|58019.20|0.06|0.01|A|F|1992-12-24|1993-01-28|1993-01-19|TAKE BACK RETURN|REG AIR|quests against the carefully unusual epi|
+389|189295|1814|1|2|2768.58|0.09|0.00|R|F|1994-04-13|1994-04-10|1994-04-25|TAKE BACK RETURN|RAIL|fts. courts eat blithely even dependenc|
+390|106523|9034|1|10|15295.20|0.02|0.05|N|O|1998-05-26|1998-07-06|1998-06-23|TAKE BACK RETURN|SHIP| requests. final accounts x-ray beside the|
+390|123353|890|2|17|23397.95|0.09|0.06|N|O|1998-06-07|1998-06-14|1998-07-07|COLLECT COD|SHIP|ending, pending pinto beans wake slyl|
+390|183266|8303|3|46|62065.96|0.07|0.04|N|O|1998-06-06|1998-05-20|1998-06-14|DELIVER IN PERSON|SHIP|cial excuses. bold, pending packages|
+390|141937|1938|4|42|83115.06|0.01|0.05|N|O|1998-06-06|1998-06-22|1998-07-05|COLLECT COD|SHIP|counts nag across the sly, sil|
+390|127657|170|5|13|21900.45|0.02|0.06|N|O|1998-07-08|1998-05-10|1998-07-18|DELIVER IN PERSON|SHIP|sleep carefully idle packages. blithely |
+390|124632|9657|6|11|18222.93|0.09|0.06|N|O|1998-05-05|1998-05-15|1998-06-01|DELIVER IN PERSON|SHIP|according to the foxes are furiously |
+390|84937|2462|7|24|46126.32|0.05|0.02|N|O|1998-04-18|1998-05-19|1998-04-28|TAKE BACK RETURN|AIR|y. enticingly final depos|
+391|121586|6611|1|14|22506.12|0.09|0.02|R|F|1995-02-11|1995-02-03|1995-02-13|TAKE BACK RETURN|TRUCK| escapades sleep furiously about |
+416|93563|6073|1|25|38914.00|0.00|0.05|A|F|1993-10-11|1993-11-26|1993-10-21|DELIVER IN PERSON|TRUCK|y final theodolites about|
+416|110869|8403|2|22|41356.92|0.10|0.00|R|F|1993-12-27|1993-12-17|1994-01-09|COLLECT COD|RAIL|rint blithely above the pending sentim|
+416|174101|6619|3|25|29377.50|0.07|0.01|R|F|1993-10-16|1993-12-03|1993-10-29|NONE|AIR|ses boost after the bold requests.|
+417|39560|9561|1|39|58482.84|0.01|0.02|A|F|1994-05-31|1994-05-02|1994-06-06|NONE|SHIP|y regular requests wake along |
+417|69212|4225|2|18|21261.78|0.00|0.01|R|F|1994-03-29|1994-04-10|1994-04-26|TAKE BACK RETURN|FOB|- final requests sle|
+417|44192|6697|3|41|46583.79|0.10|0.01|R|F|1994-04-11|1994-03-08|1994-05-06|COLLECT COD|RAIL|tes. regular requests across the |
+417|131087|1088|4|2|2236.16|0.01|0.03|R|F|1994-02-13|1994-04-19|1994-03-15|DELIVER IN PERSON|SHIP|uriously bol|
+418|18552|1054|1|31|45587.05|0.00|0.03|N|F|1995-06-05|1995-06-18|1995-06-26|COLLECT COD|FOB|final theodolites. fluffil|
+418|1062|3563|2|1|963.06|0.04|0.07|N|O|1995-06-23|1995-06-16|1995-07-23|DELIVER IN PERSON|AIR|regular, silent pinto|
+418|34829|7333|3|3|5291.46|0.04|0.06|N|O|1995-06-29|1995-07-12|1995-07-01|COLLECT COD|AIR|ly furiously regular w|
+419|152691|7722|1|33|57541.77|0.05|0.02|N|O|1996-11-06|1996-12-25|1996-11-20|TAKE BACK RETURN|TRUCK|y above the bli|
+419|64192|9205|2|32|36998.08|0.01|0.06|N|O|1996-12-04|1996-12-04|1996-12-24|COLLECT COD|SHIP|blithely regular requests. special pinto|
+419|70495|3003|3|15|21982.35|0.07|0.04|N|O|1996-12-17|1996-11-28|1996-12-19|TAKE BACK RETURN|REG AIR| sleep final, regular theodolites. fluffi|
+419|8756|6257|4|15|24971.25|0.01|0.02|N|O|1997-01-09|1996-12-22|1997-01-25|COLLECT COD|FOB|of the careful, thin theodolites. quickly s|
+419|148401|3430|5|17|24639.80|0.01|0.00|N|O|1997-01-13|1996-12-20|1997-02-01|COLLECT COD|REG AIR|lar dependencies: carefully regu|
+420|100885|5906|1|5|9429.40|0.04|0.03|N|O|1995-11-04|1996-01-02|1995-11-30|NONE|REG AIR|cajole blit|
+420|161079|8628|2|22|25081.54|0.05|0.04|N|O|1996-01-25|1995-12-16|1996-02-03|TAKE BACK RETURN|AIR|ly against the blithely re|
+420|47557|2566|3|45|67704.75|0.09|0.08|N|O|1996-01-14|1996-01-01|1996-01-26|COLLECT COD|FOB| final accounts. furiously express forges|
+420|74795|4796|4|12|21237.48|0.08|0.08|N|O|1996-02-05|1996-01-03|1996-02-12|TAKE BACK RETURN|REG AIR|c instructions are |
+420|72918|7933|5|37|69963.67|0.02|0.00|N|O|1995-11-16|1995-12-13|1995-11-19|DELIVER IN PERSON|SHIP|rbits. bold requests along the quickl|
+420|123736|1273|6|40|70389.20|0.01|0.05|N|O|1995-11-26|1995-12-26|1995-12-20|TAKE BACK RETURN|FOB| after the special|
+420|15978|5979|7|39|73864.83|0.00|0.08|N|O|1995-12-09|1995-12-16|1995-12-31|DELIVER IN PERSON|REG AIR|s. ironic waters about the car|
+421|133070|3071|1|1|1103.07|0.02|0.07|R|F|1992-05-29|1992-04-27|1992-06-09|NONE|TRUCK|oldly busy deposit|
+422|151816|4332|1|25|46695.25|0.10|0.07|N|O|1997-07-01|1997-08-17|1997-07-09|DELIVER IN PERSON|SHIP|carefully bold theodolit|
+422|170666|3184|2|10|17366.60|0.02|0.03|N|O|1997-06-15|1997-08-04|1997-07-08|TAKE BACK RETURN|AIR|he furiously ironic theodolite|
+422|175984|3536|3|46|94759.08|0.09|0.00|N|O|1997-06-21|1997-07-14|1997-06-27|DELIVER IN PERSON|RAIL| ideas. qu|
+422|161622|9171|4|25|42090.50|0.10|0.04|N|O|1997-08-24|1997-07-09|1997-09-22|NONE|FOB|ep along the furiousl|
+423|131890|6917|1|27|51891.03|0.06|0.03|N|O|1996-08-20|1996-08-01|1996-08-23|TAKE BACK RETURN|SHIP|ccounts. blithely regular pack|
+448|125197|5198|1|4|4888.76|0.00|0.04|N|O|1995-11-25|1995-10-20|1995-11-26|TAKE BACK RETURN|MAIL|nts thrash quickly among the b|
+448|172359|9911|2|46|65842.10|0.05|0.00|N|O|1995-08-31|1995-09-30|1995-09-09|COLLECT COD|SHIP| to the fluffily ironic packages.|
+448|26809|1814|3|35|60753.00|0.10|0.08|N|O|1995-09-27|1995-11-19|1995-10-20|COLLECT COD|REG AIR|ses nag quickly quickly ir|
+448|169045|9046|4|8|8912.32|0.10|0.00|N|O|1995-11-02|1995-10-16|1995-11-15|COLLECT COD|TRUCK|ounts wake blithely. furiously pending|
+448|137283|7284|5|23|30366.44|0.02|0.05|N|O|1995-09-26|1995-11-02|1995-10-17|NONE|SHIP|ious, final gifts|
+449|151908|6939|1|12|23518.80|0.02|0.08|N|O|1995-11-06|1995-08-25|1995-11-18|TAKE BACK RETURN|SHIP|ly. blithely ironic |
+449|108408|5939|2|4|5665.60|0.10|0.06|N|O|1995-10-27|1995-09-14|1995-11-21|DELIVER IN PERSON|FOB|are fluffily. requests are furiously|
+449|9982|9983|3|3|5675.94|0.07|0.08|N|O|1995-07-28|1995-09-11|1995-08-01|NONE|RAIL| bold deposits. express theodolites haggle|
+449|157659|2690|4|22|37766.30|0.07|0.00|N|O|1995-08-17|1995-09-04|1995-09-10|COLLECT COD|FOB|furiously final theodolites eat careful|
+450|161582|9131|1|42|69030.36|0.03|0.00|N|F|1995-06-07|1995-05-29|1995-06-23|TAKE BACK RETURN|SHIP|y asymptotes. regular depen|
+450|106298|6299|2|5|6521.45|0.03|0.02|A|F|1995-04-02|1995-05-06|1995-04-13|TAKE BACK RETURN|TRUCK|the pinto bea|
+450|142528|7557|3|32|50256.64|0.06|0.03|N|O|1995-07-02|1995-04-25|1995-07-30|TAKE BACK RETURN|SHIP| accounts nod fluffily even, pending|
+450|56267|3783|4|40|48930.40|0.05|0.03|R|F|1995-03-20|1995-05-25|1995-04-14|NONE|RAIL|ve. asymptote|
+450|78048|8049|5|2|2052.08|0.09|0.00|A|F|1995-03-11|1995-05-21|1995-03-16|COLLECT COD|AIR|y even pinto beans; qui|
+450|152726|5242|6|33|58697.76|0.08|0.05|R|F|1995-05-18|1995-05-22|1995-05-23|TAKE BACK RETURN|REG AIR|ily carefully final depo|
+451|129532|4557|1|36|56215.08|0.02|0.06|N|O|1998-06-18|1998-08-14|1998-06-20|TAKE BACK RETURN|AIR|rges can haggle carefully ironic, dogged |
+451|32028|7035|2|42|40320.84|0.05|0.01|N|O|1998-08-01|1998-08-05|1998-08-30|DELIVER IN PERSON|TRUCK|express excuses. blithely ironic pin|
+451|86136|6137|3|1|1122.13|0.07|0.05|N|O|1998-07-13|1998-07-03|1998-08-04|DELIVER IN PERSON|AIR| carefully ironic packages solve furiously |
+451|76558|4080|4|28|42967.40|0.04|0.05|N|O|1998-06-16|1998-07-09|1998-06-17|DELIVER IN PERSON|SHIP| theodolites. even cou|
+452|114639|4640|1|2|3307.26|0.04|0.03|N|O|1997-12-26|1998-01-03|1998-01-12|COLLECT COD|FOB|y express instru|
+453|197917|2956|1|45|90670.95|0.01|0.00|N|O|1997-06-30|1997-08-20|1997-07-19|COLLECT COD|REG AIR|ifts wake carefully.|
+453|175131|2683|2|38|45832.94|0.08|0.04|N|O|1997-06-30|1997-07-08|1997-07-16|DELIVER IN PERSON|REG AIR| furiously f|
+453|13144|8147|3|38|40171.32|0.10|0.01|N|O|1997-08-10|1997-07-24|1997-09-07|NONE|SHIP|sts cajole. furiously un|
+453|95748|5749|4|45|78468.30|0.10|0.01|N|O|1997-09-18|1997-06-29|1997-10-14|TAKE BACK RETURN|AIR|ironic foxes. slyly pending depos|
+453|25722|8225|5|32|52727.04|0.04|0.01|N|O|1997-07-15|1997-06-27|1997-07-18|NONE|REG AIR|s. fluffily bold packages cajole. unu|
+453|94318|6828|6|28|36744.68|0.07|0.07|N|O|1997-08-16|1997-08-12|1997-08-27|NONE|MAIL|final dependencies. slyly special pl|
+454|117595|5129|1|24|38702.16|0.06|0.01|N|O|1996-04-26|1996-03-23|1996-05-20|NONE|TRUCK|le. deposits after the ideas nag unusual pa|
+455|156485|4031|1|42|64742.16|0.10|0.02|N|O|1997-01-26|1997-01-10|1997-02-22|DELIVER IN PERSON|REG AIR|around the quickly blit|
+455|27230|7231|2|44|50918.12|0.05|0.08|N|O|1997-01-17|1997-02-22|1997-02-12|TAKE BACK RETURN|TRUCK| accounts sleep slyly ironic asymptote|
+455|48360|3369|3|45|58876.20|0.04|0.06|N|O|1996-12-20|1997-01-31|1997-01-07|TAKE BACK RETURN|SHIP|thrash ironically regular packages. qui|
+455|170012|7564|4|11|11902.11|0.01|0.02|N|O|1997-03-15|1997-02-14|1997-03-26|DELIVER IN PERSON|MAIL|g deposits against the slyly idle foxes u|
+480|52148|2149|1|22|24203.08|0.04|0.02|A|F|1993-06-16|1993-07-28|1993-07-09|NONE|MAIL|into beans cajole furiously. accounts s|
+481|18649|6153|1|17|26649.88|0.07|0.05|A|F|1992-10-21|1992-12-09|1992-11-19|DELIVER IN PERSON|MAIL|. quickly final accounts among the |
+481|20646|647|2|19|29766.16|0.08|0.01|R|F|1993-01-09|1992-11-27|1993-01-14|TAKE BACK RETURN|AIR|p blithely after t|
+481|185785|5786|3|42|78572.76|0.08|0.08|A|F|1992-11-27|1992-11-11|1992-12-08|COLLECT COD|RAIL|mptotes are furiously among the iron|
+481|81009|6026|4|11|10890.00|0.05|0.06|A|F|1993-01-12|1992-11-17|1993-02-05|NONE|FOB|eful attai|
+481|111956|6979|5|31|61006.45|0.05|0.01|A|F|1993-01-15|1992-12-31|1993-01-21|DELIVER IN PERSON|AIR|usly final packages believe. quick|
+482|137343|7344|1|32|44170.88|0.00|0.02|N|O|1996-05-22|1996-05-14|1996-05-29|NONE|SHIP|usual deposits affix against |
+482|121382|8919|2|1|1403.38|0.05|0.08|N|O|1996-05-29|1996-05-20|1996-05-31|COLLECT COD|AIR|es. quickly ironic escapades sleep furious|
+482|61141|6154|3|31|34166.34|0.04|0.03|N|O|1996-06-01|1996-05-06|1996-06-17|NONE|MAIL| blithe pin|
+482|195826|5827|4|8|15374.56|0.02|0.05|N|O|1996-04-19|1996-05-05|1996-04-21|NONE|TRUCK|tructions near the final, regular ideas de|
+482|38215|3222|5|46|53047.66|0.01|0.06|N|O|1996-07-19|1996-06-05|1996-08-10|NONE|MAIL|furiously thin realms. final, fina|
+482|78696|8697|6|19|31819.11|0.04|0.00|N|O|1996-03-27|1996-04-25|1996-04-15|NONE|FOB|ts hinder carefully silent requests|
+483|32694|5198|1|8|13013.52|0.00|0.08|N|O|1995-08-22|1995-08-23|1995-09-18|COLLECT COD|RAIL|osits. carefully fin|
+483|79758|9759|2|23|39968.25|0.04|0.06|N|O|1995-07-20|1995-08-11|1995-08-04|DELIVER IN PERSON|MAIL|requests was quickly against th|
+483|87745|254|3|9|15594.66|0.04|0.03|N|O|1995-09-10|1995-09-02|1995-09-13|NONE|AIR| carefully express ins|
+484|30133|5140|1|49|52093.37|0.10|0.02|N|O|1997-03-06|1997-02-28|1997-03-23|COLLECT COD|TRUCK|ven accounts|
+484|31950|9460|2|45|84687.75|0.06|0.07|N|O|1997-04-09|1997-03-20|1997-04-19|DELIVER IN PERSON|TRUCK|usly final excuses boost slyly blithe|
+484|183351|5870|3|50|71717.50|0.06|0.05|N|O|1997-01-24|1997-03-27|1997-02-22|DELIVER IN PERSON|MAIL|uctions wake. final, silent requests haggle|
+484|164805|4806|4|22|41135.60|0.07|0.03|N|O|1997-04-29|1997-03-26|1997-05-17|TAKE BACK RETURN|SHIP|es are pending instructions. furiously unu|
+484|76308|1323|5|48|61646.40|0.00|0.05|N|O|1997-03-05|1997-02-08|1997-03-22|TAKE BACK RETURN|MAIL|l, bold packages? even mult|
+484|96871|9381|6|10|18678.70|0.01|0.08|N|O|1997-04-06|1997-02-14|1997-04-16|COLLECT COD|FOB|x fluffily carefully regular|
+485|149523|9524|1|50|78626.00|0.01|0.00|N|O|1997-03-28|1997-05-26|1997-04-18|TAKE BACK RETURN|MAIL|iously quick excuses. carefully final f|
+485|27973|2978|2|40|76038.80|0.08|0.01|N|O|1997-04-29|1997-05-08|1997-04-30|TAKE BACK RETURN|TRUCK|al escapades|
+485|136884|4424|3|22|42259.36|0.00|0.05|N|O|1997-04-06|1997-04-27|1997-05-01|DELIVER IN PERSON|TRUCK|refully final notornis haggle according |
+486|75437|5438|1|36|50847.48|0.00|0.01|N|O|1996-06-25|1996-05-06|1996-07-07|COLLECT COD|AIR|deposits around the quickly regular packa|
+486|67040|7041|2|40|40281.60|0.03|0.08|N|O|1996-05-21|1996-06-06|1996-06-07|COLLECT COD|SHIP|ts nag quickly among the slyl|
+486|135912|8426|3|26|50645.66|0.04|0.03|N|O|1996-03-16|1996-05-25|1996-03-31|NONE|RAIL|forges along the |
+486|71865|6880|4|38|69800.68|0.08|0.05|N|O|1996-05-07|1996-04-26|1996-05-26|TAKE BACK RETURN|TRUCK| blithely final pinto |
+486|28099|5606|5|3|3081.27|0.07|0.05|N|O|1996-07-07|1996-04-20|1996-07-23|DELIVER IN PERSON|RAIL|ccounts ha|
+486|46543|9048|6|46|68518.84|0.00|0.03|N|O|1996-04-18|1996-05-02|1996-04-20|COLLECT COD|AIR|theodolites eat carefully furious|
+487|91896|1897|1|47|88730.83|0.06|0.06|R|F|1992-09-30|1992-10-08|1992-10-24|NONE|TRUCK|tions. blithely reg|
+487|82099|2100|2|2|2162.18|0.02|0.06|R|F|1992-10-19|1992-11-04|1992-11-11|COLLECT COD|TRUCK|oss the unusual pinto beans. reg|
+512|188804|1323|1|19|35963.20|0.08|0.05|N|O|1995-07-12|1995-07-11|1995-08-04|COLLECT COD|MAIL| sleep. requests alongside of the fluff|
+512|22847|7852|2|37|65484.08|0.01|0.04|N|O|1995-06-20|1995-07-05|1995-07-16|NONE|RAIL|nic depths cajole? blithely b|
+512|179419|9420|3|40|59936.40|0.05|0.02|N|O|1995-07-06|1995-07-08|1995-07-08|COLLECT COD|TRUCK|quests are da|
+512|82470|7487|4|10|14524.70|0.09|0.02|N|O|1995-09-16|1995-07-29|1995-10-07|NONE|AIR|xes. pinto beans cajole carefully; |
+512|64154|4155|5|6|6708.90|0.03|0.05|R|F|1995-06-10|1995-06-21|1995-06-16|DELIVER IN PERSON|FOB|en ideas haggle |
+512|32014|4518|6|12|11352.12|0.04|0.00|R|F|1995-05-21|1995-08-03|1995-06-09|COLLECT COD|FOB|old furiously express deposits. specia|
+512|50769|3275|7|2|3439.52|0.09|0.08|N|O|1995-06-19|1995-08-13|1995-06-24|NONE|TRUCK|e slyly silent accounts serve with|
+513|61732|9251|1|20|33874.60|0.09|0.07|N|O|1995-07-12|1995-05-31|1995-07-31|NONE|AIR|efully ironic ideas doze slyl|
+513|121628|9165|2|44|72583.28|0.01|0.01|N|O|1995-07-14|1995-07-14|1995-08-12|NONE|MAIL|kages sleep boldly ironic theodolites. acco|
+514|78713|1221|1|21|35525.91|0.06|0.02|N|O|1996-06-09|1996-05-15|1996-07-07|DELIVER IN PERSON|RAIL|s sleep quickly blithely|
+514|117452|9964|2|34|49961.30|0.08|0.02|N|O|1996-04-14|1996-06-03|1996-04-23|COLLECT COD|REG AIR|ily even patterns. bold, silent instruc|
+514|12812|5314|3|6|10348.86|0.06|0.01|N|O|1996-05-30|1996-06-04|1996-06-28|COLLECT COD|SHIP|as haggle blithely; quickly s|
+514|115362|5363|4|43|59226.48|0.00|0.08|N|O|1996-06-07|1996-05-14|1996-07-01|TAKE BACK RETURN|FOB|thely regular |
+515|104014|6525|1|10|10180.10|0.03|0.02|A|F|1993-10-04|1993-11-03|1993-10-08|NONE|FOB|ar deposits th|
+515|147605|2634|2|38|62798.80|0.10|0.07|A|F|1993-09-19|1993-11-12|1993-10-03|DELIVER IN PERSON|SHIP|ays. furiously express requests haggle furi|
+515|182145|9700|3|11|13498.54|0.00|0.02|R|F|1993-09-04|1993-10-02|1993-09-05|DELIVER IN PERSON|FOB|ly pending accounts haggle blithel|
+515|108606|8607|4|34|54896.40|0.09|0.03|R|F|1993-10-03|1993-10-26|1993-10-15|DELIVER IN PERSON|REG AIR|ic dependencie|
+515|130881|3395|5|32|61180.16|0.01|0.07|R|F|1993-10-10|1993-10-08|1993-11-02|TAKE BACK RETURN|FOB|r sauternes boost. final theodolites wake a|
+515|108692|3713|6|25|42517.25|0.04|0.08|R|F|1993-11-14|1993-11-07|1993-12-03|DELIVER IN PERSON|MAIL|e packages engag|
+516|24974|9979|1|11|20888.67|0.01|0.06|N|O|1998-05-02|1998-05-23|1998-05-12|DELIVER IN PERSON|FOB|ongside of the blithely final reque|
+517|44551|4552|1|28|41875.40|0.03|0.02|N|O|1997-04-30|1997-05-18|1997-05-17|COLLECT COD|MAIL| requests. special, fi|
+517|155391|7907|2|15|21695.85|0.02|0.00|N|O|1997-04-09|1997-06-26|1997-05-01|NONE|TRUCK| slyly. express requests ar|
+517|40932|3437|3|9|16856.37|0.04|0.00|N|O|1997-05-03|1997-06-16|1997-05-24|COLLECT COD|SHIP| slyly stealthily express instructions. |
+517|132197|2198|4|11|13521.09|0.06|0.02|N|O|1997-06-20|1997-06-01|1997-06-27|NONE|REG AIR|ly throughout the fu|
+517|23349|8354|5|23|29263.82|0.00|0.01|N|O|1997-04-19|1997-05-07|1997-05-12|COLLECT COD|RAIL| kindle. furiously bold requests mus|
+518|164711|4712|1|30|53271.30|0.07|0.05|N|O|1998-02-18|1998-03-27|1998-03-16|COLLECT COD|TRUCK|slyly by the packages. carefull|
+518|83164|689|2|23|26384.68|0.05|0.07|N|O|1998-02-20|1998-05-05|1998-03-11|COLLECT COD|TRUCK| special requests. fluffily ironic re|
+518|133178|8205|3|12|14534.04|0.01|0.06|N|O|1998-03-08|1998-03-31|1998-04-06|NONE|AIR| packages thrash slyly|
+518|121990|1991|4|46|92551.54|0.07|0.02|N|O|1998-04-07|1998-04-17|1998-04-29|NONE|MAIL|. blithely even ideas cajole furiously. b|
+518|70019|20|5|16|15824.16|0.01|0.01|N|O|1998-03-15|1998-03-24|1998-04-08|NONE|MAIL|use quickly expre|
+518|196358|1397|6|39|56719.65|0.09|0.08|N|O|1998-02-26|1998-03-17|1998-03-21|DELIVER IN PERSON|FOB| the bold, special deposits are carefully |
+518|185956|8475|7|48|98013.60|0.03|0.07|N|O|1998-03-06|1998-04-22|1998-03-14|NONE|FOB| slyly final platelets; quickly even deposi|
+519|158970|4001|1|1|2028.97|0.07|0.07|N|O|1997-12-01|1998-01-26|1997-12-23|COLLECT COD|REG AIR|bold requests believe furiou|
+519|2946|2947|2|38|70259.72|0.05|0.08|N|O|1998-02-19|1997-12-15|1998-03-19|DELIVER IN PERSON|FOB|gular excuses detect quickly furiously |
+519|105900|921|3|19|36212.10|0.00|0.02|N|O|1998-01-09|1998-01-03|1998-02-06|COLLECT COD|AIR|asymptotes. p|
+519|46267|3780|4|27|32758.02|0.08|0.06|N|O|1997-11-20|1997-12-06|1997-12-16|DELIVER IN PERSON|REG AIR|le. even, final dependencies|
+519|9041|4042|5|13|12350.52|0.06|0.08|N|O|1998-02-06|1997-12-02|1998-03-03|TAKE BACK RETURN|TRUCK|c accounts wake along the ironic so|
+519|150926|5957|6|3|5930.76|0.04|0.00|N|O|1998-02-01|1998-01-25|1998-02-27|TAKE BACK RETURN|FOB|erve blithely blithely ironic asymp|
+544|138474|8475|1|47|71086.09|0.08|0.06|R|F|1993-03-14|1993-03-27|1993-03-27|COLLECT COD|SHIP|ecial pains. deposits grow foxes. |
+545|169547|9548|1|4|6466.16|0.02|0.00|N|O|1996-02-23|1995-12-16|1996-03-21|DELIVER IN PERSON|FOB|, ironic grouches cajole over|
+545|170188|5223|2|18|22647.24|0.00|0.00|N|O|1996-02-21|1996-01-17|1996-02-26|NONE|RAIL|al, final packages affix. even a|
+546|84585|2110|1|16|25113.28|0.08|0.02|N|O|1997-02-04|1996-12-30|1997-02-25|DELIVER IN PERSON|TRUCK|de of the orbits. sometimes regula|
+547|70789|5804|1|44|77430.32|0.08|0.08|N|O|1996-10-18|1996-08-17|1996-10-27|TAKE BACK RETURN|FOB|thely express dependencies. qu|
+547|136347|1374|2|48|66400.32|0.01|0.04|N|O|1996-10-21|1996-08-04|1996-11-20|COLLECT COD|SHIP|thely specia|
+547|181345|6382|3|3|4279.02|0.05|0.02|N|O|1996-09-04|1996-08-01|1996-09-21|COLLECT COD|SHIP|pinto beans. ironi|
+548|196550|6551|1|2|3293.10|0.06|0.05|A|F|1994-11-26|1994-11-06|1994-12-06|COLLECT COD|MAIL|ests haggle quickly eve|
+548|4641|4642|2|6|9273.84|0.00|0.08|A|F|1995-01-18|1994-12-08|1995-02-10|NONE|TRUCK|sits wake furiously regular|
+548|182|7683|3|21|22725.78|0.03|0.08|A|F|1995-01-13|1994-12-18|1995-01-25|NONE|AIR|ideas. special accounts above the furiou|
+548|56720|4236|4|21|35211.12|0.08|0.03|A|F|1994-10-27|1994-12-04|1994-11-21|DELIVER IN PERSON|AIR| engage quickly. regular theo|
+548|92995|523|5|19|37771.81|0.00|0.02|A|F|1994-09-24|1994-11-24|1994-10-01|DELIVER IN PERSON|MAIL|courts boost care|
+548|152753|7784|6|32|57784.00|0.06|0.04|A|F|1994-12-16|1994-11-20|1994-12-29|NONE|REG AIR|c instruction|
+549|195061|100|1|18|20809.08|0.07|0.04|R|F|1992-10-19|1992-08-12|1992-11-13|COLLECT COD|REG AIR|furiously according to the ironic, regular |
+549|188735|8736|2|38|69301.74|0.07|0.05|A|F|1992-08-17|1992-08-28|1992-09-05|COLLECT COD|RAIL|the regular, furious excuses. carefu|
+549|65213|5214|3|36|42415.56|0.08|0.04|R|F|1992-09-11|1992-10-11|1992-09-12|DELIVER IN PERSON|AIR|ts against the ironic, even theodolites eng|
+549|20101|7608|4|18|18379.80|0.09|0.01|A|F|1992-07-31|1992-09-11|1992-08-08|NONE|RAIL|ely regular accounts above the |
+549|23480|987|5|38|53332.24|0.06|0.02|R|F|1992-08-23|1992-08-12|1992-08-25|COLLECT COD|REG AIR|eposits. carefully regular depos|
+550|190307|2827|1|31|43316.30|0.04|0.02|N|O|1995-10-24|1995-09-27|1995-11-04|COLLECT COD|AIR|thely silent packages. unusual|
+551|23786|6289|1|8|13678.24|0.08|0.02|N|O|1995-07-29|1995-07-18|1995-08-02|NONE|REG AIR| wake quickly slyly pending platel|
+551|158813|3844|2|20|37436.20|0.00|0.07|N|O|1995-09-18|1995-08-25|1995-10-11|COLLECT COD|TRUCK|r ideas. final, even ideas hinder alongside|
+551|161089|6122|3|16|18401.28|0.07|0.06|N|O|1995-07-29|1995-08-19|1995-08-10|COLLECT COD|MAIL|y along the carefully ex|
+576|86490|1507|1|2|2952.98|0.07|0.01|N|O|1997-05-15|1997-06-30|1997-05-28|NONE|RAIL|ccounts along the ac|
+576|33096|8103|2|6|6174.54|0.06|0.05|N|O|1997-05-15|1997-07-26|1997-06-03|DELIVER IN PERSON|TRUCK|al deposits. slyly even sauternes a|
+576|36565|9069|3|6|9009.36|0.08|0.07|N|O|1997-08-28|1997-06-16|1997-09-25|DELIVER IN PERSON|FOB|ts. ironic multipliers |
+576|137608|2635|4|5|8228.00|0.03|0.07|N|O|1997-06-11|1997-06-17|1997-07-05|NONE|REG AIR|l foxes boost slyly. accounts af|
+577|25886|891|1|25|45297.00|0.06|0.01|A|F|1995-04-09|1995-02-20|1995-05-09|TAKE BACK RETURN|AIR|ve slyly of the frets. careful|
+577|63233|8246|2|14|16747.22|0.08|0.03|R|F|1995-03-19|1995-02-25|1995-04-09|DELIVER IN PERSON|RAIL|l accounts wake deposits. ironic packa|
+578|155542|5543|1|40|63901.60|0.02|0.08|N|O|1997-02-10|1997-03-18|1997-02-11|NONE|SHIP|usly even platel|
+578|187025|2062|2|23|25576.46|0.05|0.08|N|O|1997-03-06|1997-03-03|1997-03-20|TAKE BACK RETURN|FOB|nstructions. ironic deposits|
+579|150618|5649|1|9|15017.49|0.00|0.05|N|O|1998-06-20|1998-04-28|1998-07-19|DELIVER IN PERSON|RAIL|e ironic, express deposits are furiously|
+579|32145|7152|2|39|42008.46|0.02|0.01|N|O|1998-06-21|1998-06-03|1998-06-26|COLLECT COD|REG AIR|ncies. furiously final r|
+579|59048|4059|3|6|6042.24|0.03|0.00|N|O|1998-04-24|1998-05-03|1998-05-08|TAKE BACK RETURN|TRUCK|ickly final requests-- bold accou|
+579|6189|8690|4|41|44902.38|0.04|0.05|N|O|1998-05-28|1998-05-01|1998-06-04|COLLECT COD|REG AIR|bold, express requests sublate slyly. blith|
+579|12612|5114|5|28|42689.08|0.00|0.03|N|O|1998-07-10|1998-05-24|1998-07-19|NONE|RAIL|ic ideas until th|
+579|166717|9234|6|5|8918.55|0.05|0.08|N|O|1998-05-02|1998-04-25|1998-05-05|COLLECT COD|REG AIR|refully silent ideas cajole furious|
+580|84916|2441|1|33|62730.03|0.03|0.05|N|O|1997-10-11|1997-09-19|1997-10-16|TAKE BACK RETURN|FOB|y express theodolites cajole carefully |
+580|173320|3321|2|31|43192.92|0.04|0.08|N|O|1997-10-04|1997-09-08|1997-10-15|COLLECT COD|FOB|ose alongside of the sl|
+580|184444|6963|3|19|29040.36|0.04|0.04|N|O|1997-07-23|1997-09-21|1997-08-15|NONE|FOB|mong the special packag|
+581|63384|8397|1|41|55242.58|0.09|0.07|N|O|1997-05-26|1997-04-06|1997-06-10|TAKE BACK RETURN|MAIL|nts. quickly|
+581|92527|5037|2|14|21273.28|0.06|0.08|N|O|1997-05-17|1997-04-14|1997-06-08|NONE|MAIL|. deposits s|
+581|100106|5127|3|49|54198.90|0.10|0.02|N|O|1997-02-27|1997-04-24|1997-03-10|TAKE BACK RETURN|MAIL|. slyly regular pinto beans acr|
+581|74925|9940|4|30|56997.60|0.10|0.08|N|O|1997-06-19|1997-05-21|1997-06-22|TAKE BACK RETURN|TRUCK| regular ideas grow furio|
+582|56409|3925|1|7|9557.80|0.07|0.00|N|O|1997-11-16|1997-11-29|1997-12-10|TAKE BACK RETURN|FOB|ithely unusual t|
+582|50262|263|2|49|59400.74|0.05|0.03|N|O|1997-12-17|1998-01-12|1997-12-31|COLLECT COD|REG AIR|nts according to the furiously regular pin|
+582|140309|5338|3|42|56670.60|0.07|0.00|N|O|1997-11-15|1997-12-21|1997-12-03|COLLECT COD|SHIP|iously beside the silent de|
+582|167750|7751|4|36|65439.00|0.06|0.01|N|O|1997-12-09|1997-11-27|1997-12-26|TAKE BACK RETURN|SHIP|lar requests. quickly |
+583|144364|4365|1|1|1408.36|0.07|0.07|N|O|1997-06-17|1997-04-29|1997-06-28|NONE|TRUCK| regular, regular ideas. even, bra|
+583|119625|2137|2|47|77297.14|0.10|0.06|N|O|1997-07-14|1997-05-12|1997-08-11|DELIVER IN PERSON|AIR|nts are fluffily. furiously even re|
+583|129431|1944|3|34|49654.62|0.01|0.02|N|O|1997-05-11|1997-04-24|1997-06-03|DELIVER IN PERSON|MAIL|express req|
+583|141250|8793|4|33|42611.25|0.10|0.01|N|O|1997-05-28|1997-04-25|1997-06-24|NONE|AIR|kages cajole slyly across the|
+583|188537|6092|5|13|21131.89|0.04|0.06|N|O|1997-06-23|1997-05-29|1997-07-08|COLLECT COD|TRUCK|y sly theodolites. ironi|
+608|153579|1125|1|19|31018.83|0.08|0.06|N|O|1996-04-19|1996-05-02|1996-05-03|DELIVER IN PERSON|RAIL|ideas. the|
+608|197310|2349|2|40|56292.40|0.03|0.01|N|O|1996-05-21|1996-04-11|1996-06-02|NONE|AIR| alongside of the regular tithes. sly|
+609|65533|8040|1|21|31469.13|0.01|0.05|R|F|1994-08-24|1994-08-23|1994-08-27|DELIVER IN PERSON|FOB|de of the special warthogs. excu|
+610|110792|5815|1|49|88336.71|0.10|0.07|N|O|1995-08-29|1995-10-26|1995-09-12|TAKE BACK RETURN|SHIP|ular instruc|
+610|67896|5415|2|11|20502.79|0.07|0.08|N|O|1995-10-31|1995-10-25|1995-11-18|NONE|MAIL|blithely final |
+610|117617|7618|3|26|42499.86|0.09|0.04|N|O|1995-11-22|1995-09-09|1995-12-04|TAKE BACK RETURN|AIR|cross the furiously even theodolites sl|
+610|185206|7725|4|17|21950.40|0.03|0.03|N|O|1995-11-01|1995-10-30|1995-11-04|COLLECT COD|FOB|p quickly instead of the slyly pending foxe|
+610|145743|5744|5|39|69760.86|0.08|0.05|N|O|1995-10-30|1995-10-21|1995-11-11|TAKE BACK RETURN|REG AIR|counts. ironic warhorses are |
+610|94365|6875|6|5|6796.80|0.00|0.07|N|O|1995-08-11|1995-10-22|1995-08-26|TAKE BACK RETURN|FOB|n pinto beans. iro|
+610|189280|4317|7|27|36970.56|0.06|0.03|N|O|1995-09-02|1995-09-19|1995-09-15|NONE|REG AIR| ironic pinto beans haggle. blithe|
+611|16855|4359|1|39|69102.15|0.05|0.06|R|F|1993-05-06|1993-04-09|1993-05-22|TAKE BACK RETURN|SHIP|nto beans |
+611|80676|677|2|1|1656.67|0.08|0.07|R|F|1993-05-17|1993-02-26|1993-06-15|DELIVER IN PERSON|MAIL|ts. pending platelets aff|
+611|119545|2057|3|39|61017.06|0.09|0.02|A|F|1993-03-10|1993-03-10|1993-03-17|TAKE BACK RETURN|TRUCK|the evenly bold requests. furious|
+612|184959|9996|1|5|10219.75|0.07|0.00|R|F|1992-11-08|1992-11-20|1992-12-03|TAKE BACK RETURN|RAIL|structions. q|
+612|194864|7384|2|28|54848.08|0.07|0.06|R|F|1993-01-02|1992-12-11|1993-01-30|DELIVER IN PERSON|TRUCK|regular instructions affix bl|
+612|66130|1143|3|49|53710.37|0.00|0.08|A|F|1993-01-08|1992-11-25|1993-01-17|TAKE BACK RETURN|REG AIR|theodolite|
+612|38942|1446|4|28|52666.32|0.05|0.00|A|F|1992-11-12|1992-12-05|1992-12-02|TAKE BACK RETURN|REG AIR|lyly regular asym|
+612|87737|246|5|1|1724.73|0.08|0.04|R|F|1992-12-18|1992-12-13|1992-12-20|TAKE BACK RETURN|FOB| requests.|
+612|188203|5758|6|33|42609.60|0.10|0.03|R|F|1992-11-30|1992-12-01|1992-12-12|COLLECT COD|MAIL|bove the blithely even ideas. careful|
+613|90027|7555|1|17|17289.34|0.06|0.06|N|O|1995-09-23|1995-08-04|1995-10-15|NONE|SHIP|ar dependencie|
+613|78348|5870|2|6|7958.04|0.05|0.05|N|O|1995-08-05|1995-08-09|1995-08-08|TAKE BACK RETURN|MAIL|y ironic deposits eat |
+613|185016|7535|3|3|3303.03|0.03|0.01|N|O|1995-09-27|1995-09-11|1995-10-05|NONE|TRUCK|ccounts cajole. |
+613|158304|8305|4|7|9536.10|0.02|0.04|N|O|1995-09-07|1995-08-02|1995-09-16|DELIVER IN PERSON|MAIL|ously blithely final pinto beans. regula|
+614|194109|9148|1|21|25265.10|0.00|0.03|R|F|1993-03-29|1993-01-06|1993-04-16|TAKE BACK RETURN|TRUCK|arefully. slyly express packag|
+614|186897|9416|2|48|95226.72|0.07|0.07|A|F|1993-03-09|1993-01-19|1993-03-19|DELIVER IN PERSON|SHIP|riously special excuses haggle along the|
+614|166963|4512|3|43|87288.28|0.05|0.00|A|F|1993-03-07|1993-02-22|1993-03-18|DELIVER IN PERSON|SHIP| express accounts wake. slyly ironic ins|
+614|146951|4494|4|14|27971.30|0.04|0.06|A|F|1992-12-03|1993-02-14|1992-12-27|DELIVER IN PERSON|SHIP|ular packages haggle about the pack|
+614|195308|7828|5|30|42099.00|0.08|0.07|R|F|1993-01-16|1993-02-08|1993-02-12|TAKE BACK RETURN|FOB|tructions are f|
+614|136241|1268|6|48|61307.52|0.04|0.08|A|F|1992-12-14|1993-01-22|1993-01-11|NONE|TRUCK| regular platelets cajole quickly eve|
+615|104545|4546|1|36|55783.44|0.10|0.01|A|F|1992-06-01|1992-07-14|1992-06-27|NONE|FOB| packages. carefully final pinto bea|
+640|92997|525|1|49|97509.51|0.09|0.02|R|F|1993-03-27|1993-04-17|1993-04-15|NONE|RAIL|s haggle slyly|
+640|416|2917|2|40|52656.40|0.09|0.05|A|F|1993-05-11|1993-04-11|1993-05-15|COLLECT COD|TRUCK|oach according to the bol|
+640|179475|7027|3|22|34198.34|0.05|0.07|A|F|1993-05-07|1993-04-14|1993-05-21|TAKE BACK RETURN|TRUCK|osits across the slyly regular theodo|
+640|31474|1475|4|45|63246.15|0.07|0.07|R|F|1993-04-15|1993-04-23|1993-04-21|DELIVER IN PERSON|REG AIR|ong the qui|
+641|125192|2729|1|18|21909.42|0.01|0.08|R|F|1993-10-17|1993-10-11|1993-10-29|DELIVER IN PERSON|AIR|p blithely bold packages. quick|
+641|99477|1987|2|1|1476.47|0.09|0.02|R|F|1993-12-03|1993-10-28|1993-12-26|TAKE BACK RETURN|RAIL| nag across the regular foxes.|
+641|94311|6821|3|40|52212.40|0.05|0.06|R|F|1993-11-22|1993-10-20|1993-12-11|DELIVER IN PERSON|REG AIR|lets. furiously regular requests cajo|
+641|70043|5058|4|25|25326.00|0.03|0.02|A|F|1993-12-04|1993-11-18|1993-12-18|TAKE BACK RETURN|FOB|d, regular d|
+641|3794|8795|5|41|69609.39|0.07|0.04|R|F|1993-11-29|1993-10-27|1993-12-04|TAKE BACK RETURN|FOB| asymptotes are quickly. bol|
+642|53624|3625|1|26|41018.12|0.10|0.03|A|F|1994-04-16|1994-02-01|1994-04-27|COLLECT COD|REG AIR|quests according to the unu|
+643|12260|9764|1|28|32823.28|0.00|0.08|A|F|1995-04-13|1995-05-12|1995-04-14|TAKE BACK RETURN|TRUCK|ly regular requests nag sly|
+643|50168|169|2|48|53671.68|0.01|0.02|N|O|1995-07-10|1995-06-07|1995-08-01|NONE|FOB|ly ironic accounts|
+643|162447|4964|3|23|34717.12|0.05|0.03|N|O|1995-07-09|1995-05-18|1995-07-31|COLLECT COD|RAIL|sits are carefully according to the e|
+643|44743|2256|4|39|65821.86|0.08|0.04|A|F|1995-06-08|1995-06-16|1995-06-13|COLLECT COD|RAIL| the pains. carefully s|
+643|189459|4496|5|47|72777.15|0.10|0.03|R|F|1995-04-05|1995-06-14|1995-04-26|DELIVER IN PERSON|RAIL|y against |
+644|133143|5657|1|46|54102.44|0.02|0.01|A|F|1992-05-20|1992-06-14|1992-06-14|DELIVER IN PERSON|RAIL| special requests was sometimes expre|
+644|129821|7358|2|11|20359.02|0.05|0.02|A|F|1992-08-20|1992-07-21|1992-09-11|TAKE BACK RETURN|TRUCK|ealthy pinto beans use carefu|
+644|100047|5068|3|44|46069.76|0.04|0.04|R|F|1992-08-17|1992-07-26|1992-08-20|COLLECT COD|REG AIR|iously ironic pinto beans. bold packa|
+644|79744|7266|4|7|12066.18|0.01|0.02|A|F|1992-05-18|1992-07-01|1992-06-07|COLLECT COD|RAIL| regular requests are blithely. slyly|
+644|49295|9296|5|23|28618.67|0.02|0.04|R|F|1992-07-31|1992-07-28|1992-08-13|DELIVER IN PERSON|TRUCK|uctions nag quickly alongside of t|
+644|84932|2457|6|33|63258.69|0.00|0.07|R|F|1992-08-26|1992-07-27|1992-08-28|NONE|AIR|ages sleep. bold, bo|
+644|50239|2745|7|38|45190.74|0.08|0.06|R|F|1992-05-17|1992-07-10|1992-06-06|TAKE BACK RETURN|MAIL| packages. blithely slow accounts nag quic|
+645|159694|2210|1|33|57871.77|0.01|0.02|A|F|1994-12-09|1995-02-21|1995-01-03|NONE|TRUCK|heodolites b|
+645|169422|9423|2|47|70096.74|0.07|0.05|R|F|1995-02-16|1995-02-15|1995-02-25|COLLECT COD|TRUCK|hely regular instructions alon|
+645|69227|4240|3|46|55026.12|0.10|0.01|A|F|1995-01-04|1995-02-21|1995-01-21|COLLECT COD|REG AIR| regular dependencies across the speci|
+645|95402|421|4|49|68472.60|0.05|0.03|R|F|1995-01-24|1995-01-06|1995-02-17|NONE|TRUCK|y. slyly iron|
+645|4703|7204|5|43|69131.10|0.06|0.02|A|F|1995-02-12|1995-02-27|1995-03-06|TAKE BACK RETURN|REG AIR| furiously accounts. slyly|
+645|33631|8638|6|18|28163.34|0.10|0.08|A|F|1995-03-02|1995-02-08|1995-03-03|COLLECT COD|RAIL|ep. slyly even |
+645|27031|7032|7|9|8622.27|0.03|0.03|A|F|1994-12-25|1995-01-04|1995-01-15|COLLECT COD|REG AIR|special deposits. regular, final th|
+646|108975|6506|1|31|61503.07|0.00|0.05|R|F|1994-12-17|1995-02-16|1995-01-04|COLLECT COD|MAIL|ag furiousl|
+646|126723|6724|2|1|1749.72|0.07|0.01|A|F|1994-12-05|1995-01-07|1994-12-31|TAKE BACK RETURN|MAIL|t blithely regular deposits. quic|
+646|29744|4749|3|24|40169.76|0.06|0.02|A|F|1995-02-20|1994-12-30|1995-03-16|TAKE BACK RETURN|TRUCK|regular accounts haggle dog|
+646|98738|3757|4|34|59048.82|0.01|0.00|R|F|1994-12-28|1994-12-27|1994-12-31|COLLECT COD|SHIP|slow accounts. fluffily idle instructions|
+646|89173|4190|5|17|19756.89|0.04|0.01|A|F|1994-12-31|1994-12-26|1995-01-01|DELIVER IN PERSON|REG AIR|inal packages haggle carefully |
+646|114481|9504|6|40|59819.20|0.10|0.01|R|F|1995-01-01|1995-01-13|1995-01-11|COLLECT COD|TRUCK|ronic packages sleep across th|
+647|16310|8812|1|41|50278.71|0.08|0.08|N|O|1997-11-19|1997-09-24|1997-12-15|COLLECT COD|REG AIR|r instructions. quickly unusu|
+647|112177|7200|2|5|5945.85|0.10|0.00|N|O|1997-09-25|1997-09-22|1997-10-25|TAKE BACK RETURN|AIR|ly express packages haggle caref|
+647|152882|7913|3|15|29023.20|0.08|0.00|N|O|1997-09-23|1997-10-09|1997-10-21|NONE|MAIL|ve the even, bold foxes sleep |
+672|172190|2191|1|41|51749.79|0.06|0.06|R|F|1994-06-20|1994-07-03|1994-06-22|COLLECT COD|REG AIR| dependencies in|
+672|189656|9657|2|9|15710.85|0.03|0.04|R|F|1994-06-25|1994-06-06|1994-07-19|TAKE BACK RETURN|TRUCK|haggle carefully carefully reg|
+672|142390|9933|3|35|50133.65|0.02|0.01|R|F|1994-07-13|1994-06-04|1994-07-14|COLLECT COD|RAIL| dependencies haggle quickly. theo|
+673|70495|5510|1|22|32240.78|0.03|0.02|R|F|1994-03-15|1994-04-27|1994-03-29|TAKE BACK RETURN|TRUCK| the regular, even requests. carefully fin|
+674|101366|3877|1|23|31449.28|0.06|0.07|A|F|1992-10-25|1992-10-15|1992-11-03|COLLECT COD|SHIP|ve the quickly even deposits. blithe|
+674|58285|3296|2|4|4973.12|0.02|0.07|R|F|1992-10-05|1992-11-22|1992-10-22|NONE|RAIL|ly express pinto beans sleep car|
+675|156455|4001|1|1|1511.45|0.04|0.08|N|O|1997-11-27|1997-09-30|1997-12-12|DELIVER IN PERSON|REG AIR|ide of the slyly regular packages. unus|
+675|136633|4173|2|35|58437.05|0.08|0.07|N|O|1997-08-19|1997-10-16|1997-09-17|DELIVER IN PERSON|REG AIR|s. furiously expre|
+675|175802|8320|3|34|63845.20|0.10|0.04|N|O|1997-11-17|1997-10-07|1997-11-27|NONE|FOB|y final accounts unwind around the |
+675|99269|6797|4|15|19023.90|0.09|0.05|N|O|1997-10-18|1997-09-28|1997-11-13|COLLECT COD|TRUCK|posits after the furio|
+675|4669|7170|5|46|72388.36|0.09|0.05|N|O|1997-09-18|1997-10-14|1997-10-01|DELIVER IN PERSON|AIR| deposits along the express foxes |
+676|50972|8488|1|9|17306.73|0.09|0.02|N|O|1997-04-03|1997-02-02|1997-04-08|COLLECT COD|REG AIR|aintain sl|
+676|77668|5190|2|20|32913.20|0.07|0.07|N|O|1997-02-02|1997-02-01|1997-02-11|NONE|REG AIR|riously around the blithely |
+676|162330|2331|3|35|48731.55|0.05|0.01|N|O|1996-12-30|1997-01-13|1997-01-19|DELIVER IN PERSON|RAIL|into beans. blithe|
+676|72825|347|4|24|43147.68|0.01|0.06|N|O|1997-02-05|1997-01-16|1997-03-07|TAKE BACK RETURN|TRUCK|ress, regular dep|
+676|165127|2676|5|31|36955.72|0.01|0.06|N|O|1997-02-06|1997-02-28|1997-03-08|COLLECT COD|TRUCK|ial deposits cajo|
+676|75930|5931|6|33|62895.69|0.09|0.05|N|O|1997-03-02|1997-02-22|1997-03-19|TAKE BACK RETURN|TRUCK|as wake slyly furiously close pinto b|
+676|142123|7152|7|11|12816.32|0.07|0.02|N|O|1997-03-09|1997-03-06|1997-03-31|TAKE BACK RETURN|MAIL|he final acco|
+677|58986|1492|1|32|62239.36|0.04|0.08|R|F|1994-01-06|1994-01-31|1994-02-02|NONE|RAIL|slyly final|
+677|167361|7362|2|39|55706.04|0.00|0.07|R|F|1993-12-19|1994-02-11|1994-01-05|TAKE BACK RETURN|SHIP|ges. furiously regular packages use |
+677|23226|3227|3|46|52864.12|0.01|0.02|R|F|1993-12-02|1994-02-12|1993-12-06|COLLECT COD|RAIL|ng theodolites. furiously unusual theodo|
+677|147638|5181|4|1|1685.63|0.06|0.05|R|F|1993-12-01|1994-01-14|1993-12-26|DELIVER IN PERSON|MAIL|ly. regular |
+677|149613|7156|5|25|41565.25|0.00|0.05|A|F|1994-03-12|1994-02-02|1994-03-28|DELIVER IN PERSON|AIR| packages integrate blithely|
+678|145537|5538|1|20|31650.60|0.05|0.08|R|F|1993-06-21|1993-04-07|1993-07-10|TAKE BACK RETURN|MAIL|furiously express excuses. foxes eat fu|
+678|36553|4063|2|22|32770.10|0.01|0.02|A|F|1993-05-10|1993-04-29|1993-06-08|NONE|REG AIR|de of the carefully even requests. bl|
+678|142489|5004|3|16|24503.68|0.06|0.02|R|F|1993-03-20|1993-04-13|1993-04-16|DELIVER IN PERSON|REG AIR|equests cajole around the carefully regular|
+678|198067|8068|4|48|55922.88|0.08|0.08|R|F|1993-02-28|1993-04-04|1993-03-24|NONE|REG AIR|ithely. slyly express foxes|
+678|97451|7452|5|16|23175.20|0.06|0.04|R|F|1993-03-09|1993-04-18|1993-04-07|NONE|AIR| about the |
+678|42888|2889|6|11|20139.68|0.09|0.00|R|F|1993-04-28|1993-05-16|1993-05-11|COLLECT COD|TRUCK|ess deposits dazzle f|
+679|191759|1760|1|9|16656.75|0.09|0.00|N|O|1995-12-20|1996-01-27|1996-01-07|COLLECT COD|REG AIR|leep slyly. entici|
+704|189981|9982|1|40|82839.20|0.05|0.05|N|O|1997-01-30|1997-01-10|1997-02-20|COLLECT COD|AIR|ggle quickly. r|
+704|3839|3840|2|14|24399.62|0.07|0.08|N|O|1997-02-02|1996-12-26|1997-02-19|DELIVER IN PERSON|REG AIR|ve the quickly final forges. furiously p|
+705|188322|841|1|46|64874.72|0.05|0.06|N|O|1997-04-18|1997-05-06|1997-05-05|DELIVER IN PERSON|SHIP|ss deposits. ironic packa|
+705|116218|3752|2|35|43197.35|0.10|0.04|N|O|1997-03-25|1997-03-20|1997-04-23|TAKE BACK RETURN|FOB|carefully ironic accounts|
+706|196629|9149|1|23|39689.26|0.05|0.00|N|O|1995-12-06|1995-12-02|1995-12-16|COLLECT COD|SHIP|ckey players. requests above the|
+707|154736|4737|1|34|60884.82|0.01|0.02|R|F|1994-12-08|1995-01-15|1995-01-02|NONE|RAIL| dependencies|
+707|42642|5147|2|22|34862.08|0.00|0.06|A|F|1995-01-12|1994-12-28|1995-01-16|DELIVER IN PERSON|REG AIR| kindle ironically|
+708|123805|1342|1|3|5486.40|0.05|0.02|N|O|1998-10-09|1998-09-22|1998-11-07|COLLECT COD|FOB|e slyly pending foxes. |
+708|179124|9125|2|19|22859.28|0.06|0.00|N|O|1998-10-28|1998-09-23|1998-11-25|COLLECT COD|SHIP| requests. even, thin ideas|
+708|121298|8835|3|33|43536.57|0.09|0.06|N|O|1998-09-10|1998-09-20|1998-09-22|COLLECT COD|RAIL|s boost carefully ruthless theodolites. f|
+708|55176|5177|4|5|5655.85|0.07|0.07|N|O|1998-07-22|1998-08-15|1998-07-28|TAKE BACK RETURN|REG AIR|c pinto beans nag after the account|
+708|142490|33|5|36|55169.64|0.08|0.01|N|O|1998-07-16|1998-09-04|1998-08-11|NONE|SHIP|ests. even, regular hockey p|
+708|22352|9859|6|7|8920.45|0.10|0.03|N|O|1998-08-16|1998-08-15|1998-09-10|COLLECT COD|REG AIR|lly express ac|
+709|86203|1220|1|7|8324.40|0.00|0.00|N|O|1998-06-14|1998-06-08|1998-06-18|TAKE BACK RETURN|RAIL| special orbits cajole |
+709|197250|9770|2|15|20208.75|0.08|0.00|N|O|1998-07-10|1998-06-26|1998-08-09|NONE|RAIL|ily regular deposits. sauternes was accor|
+709|168496|1013|3|10|15644.90|0.01|0.02|N|O|1998-06-04|1998-06-30|1998-06-11|NONE|REG AIR|ts cajole boldly |
+709|107229|7230|4|40|49448.80|0.10|0.08|N|O|1998-08-12|1998-06-20|1998-08-20|DELIVER IN PERSON|RAIL|ggle fluffily carefully ironic|
+710|162111|9660|1|47|55136.17|0.06|0.08|A|F|1993-01-18|1993-03-24|1993-01-24|TAKE BACK RETURN|MAIL|usual ideas into th|
+710|192916|2917|2|38|76338.58|0.07|0.02|R|F|1993-04-18|1993-03-12|1993-05-15|COLLECT COD|FOB|sts boost fluffily aft|
+710|138984|6524|3|7|14160.86|0.04|0.06|R|F|1993-01-20|1993-03-28|1993-02-15|TAKE BACK RETURN|REG AIR|xpress, special ideas. bl|
+710|89308|9309|4|25|32432.50|0.00|0.05|R|F|1993-03-31|1993-02-05|1993-04-22|COLLECT COD|FOB|eas detect do|
+710|185454|491|5|12|18473.40|0.01|0.02|A|F|1993-02-18|1993-02-27|1993-03-07|DELIVER IN PERSON|MAIL|ions. slyly express theodolites al|
+710|113665|1199|6|21|35251.86|0.04|0.06|R|F|1993-03-22|1993-03-05|1993-03-27|DELIVER IN PERSON|SHIP|es. furiously p|
+710|159288|6834|7|46|61974.88|0.03|0.07|R|F|1993-04-16|1993-03-27|1993-05-05|COLLECT COD|

<TRUNCATED>

[16/50] [abbrv] carbondata git commit: [CARBONDATA-2435] Remove SDK dependency on spark jars.

Posted by ma...@apache.org.
[CARBONDATA-2435] Remove SDK dependency on spark jars.

[CARBONDATA-2435] Remove SDK dependency on spark jars.
Problem and cause : when sdk writer is used in standalone cluster
without spark jars,
exception is thrown during reverse dictionary cache initialize time.

Solution: carbon SDK doesn't support dictionary encoding, This spark
dependency is only for dictionary encoding.
Move the spark dependency code inside dictionary encoding if block.
So that SDK flow will not have to access spark class.

This closes #2289


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/ff5166ef
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/ff5166ef
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/ff5166ef

Branch: refs/heads/spark-2.3
Commit: ff5166ef78c42ca0819d3d9fa439aa56d32953eb
Parents: b2060c6
Author: ajantha-bhat <aj...@gmail.com>
Authored: Wed May 9 18:07:56 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu May 10 16:23:12 2018 +0530

----------------------------------------------------------------------
 .../processing/datatypes/PrimitiveDataType.java | 12 ++++++----
 .../impl/DictionaryFieldConverterImpl.java      | 12 ++++++----
 .../converter/impl/FieldEncoderFactory.java     | 25 +++++++-------------
 .../converter/impl/RowConverterImpl.java        | 13 ++--------
 .../InputProcessorStepWithNoConverterImpl.java  |  2 +-
 5 files changed, 28 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/ff5166ef/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
index dee8968..e34c184 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
@@ -28,6 +28,8 @@ import java.util.Map;
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.cache.Cache;
+import org.apache.carbondata.core.cache.CacheProvider;
+import org.apache.carbondata.core.cache.CacheType;
 import org.apache.carbondata.core.cache.dictionary.Dictionary;
 import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
@@ -135,7 +137,6 @@ public class PrimitiveDataType implements GenericDataType<Object> {
    * @param parentname
    * @param columnId
    * @param carbonDimension
-   * @param cache
    * @param absoluteTableIdentifier
    * @param client
    * @param useOnePass
@@ -144,9 +145,9 @@ public class PrimitiveDataType implements GenericDataType<Object> {
    * @param isEmptyBadRecords
    */
   public PrimitiveDataType(CarbonColumn carbonColumn, String parentname, String columnId,
-      CarbonDimension carbonDimension, Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache,
-      AbsoluteTableIdentifier absoluteTableIdentifier, DictionaryClient client, Boolean useOnePass,
-      Map<Object, Integer> localCache, String nullFormat, Boolean isEmptyBadRecords) {
+      CarbonDimension carbonDimension, AbsoluteTableIdentifier absoluteTableIdentifier,
+      DictionaryClient client, Boolean useOnePass, Map<Object, Integer> localCache,
+      String nullFormat, Boolean isEmptyBadRecords) {
     this.name = carbonColumn.getColName();
     this.parentname = parentname;
     this.columnId = columnId;
@@ -163,6 +164,9 @@ public class PrimitiveDataType implements GenericDataType<Object> {
         dictionaryGenerator = new DirectDictionary(DirectDictionaryKeyGeneratorFactory
             .getDirectDictionaryGenerator(carbonDimension.getDataType()));
       } else if (carbonDimension.hasEncoding(Encoding.DICTIONARY)) {
+        CacheProvider cacheProvider = CacheProvider.getInstance();
+        Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache =
+            cacheProvider.createCache(CacheType.REVERSE_DICTIONARY);
         Dictionary dictionary = null;
         if (useOnePass) {
           if (CarbonUtil.isFileExistsForGivenColumn(identifier)) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ff5166ef/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/DictionaryFieldConverterImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/DictionaryFieldConverterImpl.java b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/DictionaryFieldConverterImpl.java
index 1fb4086..0757f8a 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/DictionaryFieldConverterImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/DictionaryFieldConverterImpl.java
@@ -24,6 +24,8 @@ import java.util.Map;
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.cache.Cache;
+import org.apache.carbondata.core.cache.CacheProvider;
+import org.apache.carbondata.core.cache.CacheType;
 import org.apache.carbondata.core.cache.dictionary.Dictionary;
 import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
@@ -64,16 +66,18 @@ public class DictionaryFieldConverterImpl extends AbstractDictionaryFieldConvert
   private boolean isEmptyBadRecord;
 
   public DictionaryFieldConverterImpl(DataField dataField,
-      Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache,
       AbsoluteTableIdentifier absoluteTableIdentifier, String nullFormat, int index,
-      DictionaryClient client, boolean useOnePass,
-      Map<Object, Integer> localCache, boolean isEmptyBadRecord,
-      DictionaryColumnUniqueIdentifier identifier) throws IOException {
+      DictionaryClient client, boolean useOnePass, Map<Object, Integer> localCache,
+      boolean isEmptyBadRecord, DictionaryColumnUniqueIdentifier identifier) throws IOException {
     this.index = index;
     this.carbonDimension = (CarbonDimension) dataField.getColumn();
     this.nullFormat = nullFormat;
     this.isEmptyBadRecord = isEmptyBadRecord;
 
+    CacheProvider cacheProvider = CacheProvider.getInstance();
+    Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache =
+        cacheProvider.createCache(CacheType.REVERSE_DICTIONARY);
+
     // if use one pass, use DictionaryServerClientDictionary
     if (useOnePass) {
       if (CarbonUtil.isFileExistsForGivenColumn(identifier)) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ff5166ef/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
index dd28dc6..c6cea65 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
@@ -20,8 +20,6 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.carbondata.core.cache.Cache;
-import org.apache.carbondata.core.cache.dictionary.Dictionary;
 import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
 import org.apache.carbondata.core.dictionary.client.DictionaryClient;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
@@ -61,18 +59,15 @@ public class FieldEncoderFactory {
    * Creates the FieldConverter for all dimensions, for measures return null.
    *
    * @param dataField             column schema
-   * @param cache                 dicionary cache.
    * @param absoluteTableIdentifier table identifier
    * @param index                 index of column in the row.
    * @param isEmptyBadRecord
    * @return
    */
   public FieldConverter createFieldEncoder(DataField dataField,
-      Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache,
       AbsoluteTableIdentifier absoluteTableIdentifier, int index, String nullFormat,
-      DictionaryClient client, Boolean useOnePass,
-      Map<Object, Integer> localCache, boolean isEmptyBadRecord)
-      throws IOException {
+      DictionaryClient client, Boolean useOnePass, Map<Object, Integer> localCache,
+      boolean isEmptyBadRecord) throws IOException {
     // Converters are only needed for dimensions and measures it return null.
     if (dataField.getColumn().isDimension()) {
       if (dataField.getColumn().hasEncoding(Encoding.DIRECT_DICTIONARY) &&
@@ -88,7 +83,7 @@ public class FieldEncoderFactory {
             || dataField.getColumn().getColumnSchema().getParentColumnTableRelations().isEmpty()) {
           identifier = new DictionaryColumnUniqueIdentifier(absoluteTableIdentifier,
               dataField.getColumn().getColumnIdentifier(), dataField.getColumn().getDataType());
-          return new DictionaryFieldConverterImpl(dataField, cache, absoluteTableIdentifier,
+          return new DictionaryFieldConverterImpl(dataField, absoluteTableIdentifier,
               nullFormat, index, client, useOnePass, localCache, isEmptyBadRecord,
               identifier);
         } else {
@@ -110,12 +105,12 @@ public class FieldEncoderFactory {
                   parentTableIdentifier);
           identifier = new DictionaryColumnUniqueIdentifier(parentAbsoluteTableIdentifier,
               parentColumnIdentifier, dataField.getColumn().getDataType());
-          return new DictionaryFieldConverterImpl(dataField, cache, parentAbsoluteTableIdentifier,
+          return new DictionaryFieldConverterImpl(dataField, parentAbsoluteTableIdentifier,
               nullFormat, index, null, false, null, isEmptyBadRecord, identifier);
         }
       } else if (dataField.getColumn().isComplex()) {
         return new ComplexFieldConverterImpl(
-            createComplexDataType(dataField, cache, absoluteTableIdentifier,
+            createComplexDataType(dataField, absoluteTableIdentifier,
                 client, useOnePass, localCache, index, nullFormat, isEmptyBadRecord), index);
       } else {
         return new NonDictionaryFieldConverterImpl(dataField, nullFormat, index, isEmptyBadRecord);
@@ -129,10 +124,9 @@ public class FieldEncoderFactory {
    * Create parser for the carbon column.
    */
   public static GenericDataType createComplexDataType(DataField dataField,
-      Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache,
       AbsoluteTableIdentifier absoluteTableIdentifier, DictionaryClient client, Boolean useOnePass,
       Map<Object, Integer> localCache, int index, String nullFormat, Boolean isEmptyBadRecords) {
-    return createComplexType(dataField.getColumn(), dataField.getColumn().getColName(), cache,
+    return createComplexType(dataField.getColumn(), dataField.getColumn().getColName(),
         absoluteTableIdentifier, client, useOnePass, localCache, index, nullFormat,
         isEmptyBadRecords);
   }
@@ -144,7 +138,6 @@ public class FieldEncoderFactory {
    */
 
   private static GenericDataType createComplexType(CarbonColumn carbonColumn, String parentName,
-      Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache,
       AbsoluteTableIdentifier absoluteTableIdentifier, DictionaryClient client, Boolean useOnePass,
       Map<Object, Integer> localCache, int index, String nullFormat, Boolean isEmptyBadRecords) {
     DataType dataType = carbonColumn.getDataType();
@@ -156,7 +149,7 @@ public class FieldEncoderFactory {
           new ArrayDataType(carbonColumn.getColName(), parentName, carbonColumn.getColumnId());
       for (CarbonDimension dimension : listOfChildDimensions) {
         arrayDataType.addChildren(
-            createComplexType(dimension, carbonColumn.getColName(), cache, absoluteTableIdentifier,
+            createComplexType(dimension, carbonColumn.getColName(), absoluteTableIdentifier,
                 client, useOnePass, localCache, index, nullFormat, isEmptyBadRecords));
       }
       return arrayDataType;
@@ -168,7 +161,7 @@ public class FieldEncoderFactory {
           new StructDataType(carbonColumn.getColName(), parentName, carbonColumn.getColumnId());
       for (CarbonDimension dimension : dimensions) {
         structDataType.addChildren(
-            createComplexType(dimension, carbonColumn.getColName(), cache, absoluteTableIdentifier,
+            createComplexType(dimension, carbonColumn.getColName(), absoluteTableIdentifier,
                 client, useOnePass, localCache, index, nullFormat, isEmptyBadRecords));
       }
       return structDataType;
@@ -176,7 +169,7 @@ public class FieldEncoderFactory {
       throw new UnsupportedOperationException("Complex type Map is not supported yet");
     } else {
       return new PrimitiveDataType(carbonColumn, parentName, carbonColumn.getColumnId(),
-          (CarbonDimension) carbonColumn, cache, absoluteTableIdentifier, client, useOnePass,
+          (CarbonDimension) carbonColumn, absoluteTableIdentifier, client, useOnePass,
           localCache, nullFormat, isEmptyBadRecords);
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ff5166ef/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/RowConverterImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/RowConverterImpl.java b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/RowConverterImpl.java
index 6e5c2e0..8f0557a 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/RowConverterImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/RowConverterImpl.java
@@ -29,11 +29,6 @@ import java.util.concurrent.Future;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.core.cache.Cache;
-import org.apache.carbondata.core.cache.CacheProvider;
-import org.apache.carbondata.core.cache.CacheType;
-import org.apache.carbondata.core.cache.dictionary.Dictionary;
-import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
 import org.apache.carbondata.core.datastore.row.CarbonRow;
 import org.apache.carbondata.core.dictionary.client.DictionaryClient;
 import org.apache.carbondata.core.dictionary.service.DictionaryOnePassService;
@@ -72,8 +67,6 @@ public class RowConverterImpl implements RowConverter {
 
   private ExecutorService executorService;
 
-  private Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache;
-
   private Map<Object, Integer>[] localCaches;
 
   public RowConverterImpl(DataField[] fields, CarbonDataLoadConfiguration configuration,
@@ -85,8 +78,6 @@ public class RowConverterImpl implements RowConverter {
 
   @Override
   public void initialize() throws IOException {
-    CacheProvider cacheProvider = CacheProvider.getInstance();
-    cache = cacheProvider.createCache(CacheType.REVERSE_DICTIONARY);
     String nullFormat =
         configuration.getDataLoadProperty(DataLoadProcessorConstants.SERIALIZATION_NULL_FORMAT)
             .toString();
@@ -102,7 +93,7 @@ public class RowConverterImpl implements RowConverter {
     for (int i = 0; i < fields.length; i++) {
       localCaches[i] = new ConcurrentHashMap<>();
       FieldConverter fieldConverter = FieldEncoderFactory.getInstance()
-          .createFieldEncoder(fields[i], cache, configuration.getTableIdentifier(), i, nullFormat,
+          .createFieldEncoder(fields[i], configuration.getTableIdentifier(), i, nullFormat,
               client, configuration.getUseOnePass(), localCaches[i], isEmptyBadRecord);
       fieldConverterList.add(fieldConverter);
     }
@@ -221,7 +212,7 @@ public class RowConverterImpl implements RowConverter {
       FieldConverter fieldConverter = null;
       try {
         fieldConverter = FieldEncoderFactory.getInstance()
-            .createFieldEncoder(fields[i], cache, configuration.getTableIdentifier(), i, nullFormat,
+            .createFieldEncoder(fields[i], configuration.getTableIdentifier(), i, nullFormat,
                 client, configuration.getUseOnePass(), localCaches[i], isEmptyBadRecord);
       } catch (IOException e) {
         throw new RuntimeException(e);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ff5166ef/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
index 8c24b7f..77f5260 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/InputProcessorStepWithNoConverterImpl.java
@@ -107,7 +107,7 @@ public class InputProcessorStepWithNoConverterImpl extends AbstractDataLoadProce
         // create a ComplexDataType
         dataFieldsWithComplexDataType.put(srcDataField[i].getColumn().getOrdinal(),
             fieldConverterFactory
-                .createComplexDataType(srcDataField[i], null, configuration.getTableIdentifier(),
+                .createComplexDataType(srcDataField[i], configuration.getTableIdentifier(),
                     null, false, null, i, nullFormat, isEmptyBadRecord));
       }
     }


[24/50] [abbrv] carbondata git commit: [CARBONDATA-2471]Added support for No Dictionary Complex type for Double, Decimal, Date type in SDK

Posted by ma...@apache.org.
[CARBONDATA-2471]Added support for No Dictionary Complex type for Double, Decimal, Date type in SDK

Added support for No Dictionary Complex type for Double, Decimal, Date type in SDK

This is closes #2297


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/d85fb72e
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/d85fb72e
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/d85fb72e

Branch: refs/heads/spark-2.3
Commit: d85fb72e2f24107769c7b5ce7d454d52cbaee49d
Parents: 3d8b085
Author: kumarvishal09 <ku...@gmail.com>
Authored: Thu May 10 22:52:09 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Fri May 11 15:38:27 2018 +0530

----------------------------------------------------------------------
 .../scan/complextypes/PrimitiveQueryType.java   |  18 +-
 .../apache/carbondata/core/util/ByteUtil.java   |   8 +
 .../carbondata/core/util/DataTypeUtil.java      |  18 ++
 ...ransactionalCarbonTableWithComplexType.scala | 232 +++++++++++++++++++
 .../command/carbonTableSchemaCommon.scala       |   9 +-
 .../processing/datatypes/PrimitiveDataType.java |  29 ++-
 6 files changed, 297 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/d85fb72e/core/src/main/java/org/apache/carbondata/core/scan/complextypes/PrimitiveQueryType.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/complextypes/PrimitiveQueryType.java b/core/src/main/java/org/apache/carbondata/core/scan/complextypes/PrimitiveQueryType.java
index 2db590b..edae4da 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/complextypes/PrimitiveQueryType.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/complextypes/PrimitiveQueryType.java
@@ -22,13 +22,16 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.carbondata.core.cache.dictionary.Dictionary;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
 import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
 import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
 import org.apache.carbondata.core.keygenerator.mdkey.Bits;
 import org.apache.carbondata.core.metadata.datatype.DataType;
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
 import org.apache.carbondata.core.scan.processor.RawBlockletColumnChunks;
+import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.DataTypeUtil;
 
 public class PrimitiveQueryType extends ComplexQueryType implements GenericQueryType {
@@ -46,6 +49,8 @@ public class PrimitiveQueryType extends ComplexQueryType implements GenericQuery
 
   private boolean isDictionary;
 
+  private DirectDictionaryGenerator directDictGenForDate;
+
   public PrimitiveQueryType(String name, String parentname, int blockIndex,
       DataType dataType, int keySize,
       Dictionary dictionary, boolean isDirectDictionary) {
@@ -57,6 +62,8 @@ public class PrimitiveQueryType extends ComplexQueryType implements GenericQuery
     this.parentname = parentname;
     this.isDirectDictionary = isDirectDictionary;
     this.isDictionary = (dictionary != null && isDirectDictionary == false);
+    this.directDictGenForDate =
+        DirectDictionaryKeyGeneratorFactory.getDirectDictionaryGenerator(DataTypes.DATE);
   }
 
   @Override public void addChildren(GenericQueryType children) {
@@ -116,7 +123,16 @@ public class PrimitiveQueryType extends ComplexQueryType implements GenericQuery
       int size = dataBuffer.getInt();
       byte[] value = new byte[size];
       dataBuffer.get(value, 0, size);
-      actualData = DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(value, this.dataType);
+      if (dataType == DataTypes.DATE) {
+        if (value.length == 0) {
+          actualData = null;
+        } else {
+          actualData = this.directDictGenForDate.getValueFromSurrogate(
+              ByteUtil.toInt(value, 0, CarbonCommonConstants.INT_SIZE_IN_BYTE));
+        }
+      } else {
+        actualData = DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(value, this.dataType);
+      }
     } else {
       // Dictionary Column
       byte[] data = new byte[keySize];

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d85fb72e/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
index 52fc3c3..661384c 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/ByteUtil.java
@@ -545,6 +545,14 @@ public final class ByteUtil {
     return b;
   }
 
+  public static byte[] toBytes(double val) {
+    return toBytes(Double.doubleToLongBits(val));
+  }
+
+  public static double toDouble(byte[] value, int offset, int length) {
+    return Double.longBitsToDouble(toLong(value, offset, length));
+  }
+
   /**
    * byte[] => long
    */

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d85fb72e/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java b/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
index 6967102..9822167 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
@@ -331,6 +331,10 @@ public final class DataTypeUtil {
       return ByteUtil.toBytes(Integer.parseInt(dimensionValue));
     } else if (actualDataType == DataTypes.LONG) {
       return ByteUtil.toBytes(Long.parseLong(dimensionValue));
+    } else if (actualDataType == DataTypes.DOUBLE) {
+      return ByteUtil.toBytes(Double.parseDouble(dimensionValue));
+    } else if (DataTypes.isDecimal(actualDataType)) {
+      return bigDecimalToByte(new BigDecimal(dimensionValue));
     } else if (actualDataType == DataTypes.TIMESTAMP) {
       Date dateToStr = null;
       DateFormat dateFormatter = null;
@@ -362,6 +366,10 @@ public final class DataTypeUtil {
       return Integer.parseInt(dimensionValue);
     } else if (actualDataType == DataTypes.LONG) {
       return Long.parseLong(dimensionValue);
+    } else if (actualDataType == DataTypes.DOUBLE) {
+      return Double.parseDouble(dimensionValue);
+    } else if (DataTypes.isDecimal(actualDataType)) {
+      return new BigDecimal(dimensionValue);
     } else if (actualDataType == DataTypes.TIMESTAMP) {
       Date dateToStr = null;
       DateFormat dateFormatter = null;
@@ -449,6 +457,16 @@ public final class DataTypeUtil {
           return null;
         }
         return ByteUtil.toLong(dataInBytes, 0, dataInBytes.length) * 1000L;
+      } else if (actualDataType == DataTypes.DOUBLE) {
+        if (isEmptyByteArray(dataInBytes)) {
+          return null;
+        }
+        return ByteUtil.toDouble(dataInBytes, 0, dataInBytes.length);
+      } else if (DataTypes.isDecimal(actualDataType)) {
+        if (isEmptyByteArray(dataInBytes)) {
+          return null;
+        }
+        return converter.convertFromBigDecimalToDecimal(byteToBigDecimal(dataInBytes));
       } else {
         return ByteUtil.toString(dataInBytes, 0, dataInBytes.length);
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d85fb72e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
new file mode 100644
index 0000000..ccfb231
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.createTable
+
+import java.io.{File}
+import java.util
+
+
+import org.apache.avro
+import org.apache.commons.io.FileUtils
+import org.apache.commons.lang.CharEncoding
+import org.apache.spark.sql.test.util.QueryTest
+import org.junit.Assert
+import org.scalatest.BeforeAndAfterAll
+import tech.allegro.schema.json2avro.converter.JsonAvroConverter
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.metadata.datatype.{DataTypes, StructField}
+import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
+import org.apache.carbondata.sdk.file.{CarbonWriter, Field, Schema}
+
+class TestNonTransactionalCarbonTableWithComplexType extends QueryTest with BeforeAndAfterAll {
+
+  var writerPath = new File(this.getClass.getResource("/").getPath
+                            +
+                            "../." +
+                            "./src/test/resources/SparkCarbonFileFormat/WriterOutput/")
+    .getCanonicalPath
+  //getCanonicalPath gives path with \, so code expects /. Need to handle in code ?
+  writerPath = writerPath.replace("\\", "/")
+
+
+  def cleanTestData() = {
+    FileUtils.deleteDirectory(new File(writerPath))
+  }
+
+  override def beforeAll(): Unit = {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+        CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+  }
+
+  override def afterAll(): Unit = {
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+  }
+
+  private def WriteFilesWithAvroWriter(rows: Int,
+      mySchema: String,
+      json: String,
+      fields: Array[Field]) = {
+    // conversion to GenericData.Record
+    val nn = new avro.Schema.Parser().parse(mySchema)
+    val converter = new JsonAvroConverter
+    val record = converter
+      .convertToGenericDataRecord(json.getBytes(CharEncoding.UTF_8), nn)
+
+    try {
+      val writer = CarbonWriter.builder.withSchema(new Schema(fields))
+        .outputPath(writerPath).isTransactionalTable(false)
+        .uniqueIdentifier(System.currentTimeMillis()).buildWriterForAvroInput
+      var i = 0
+      while (i < rows) {
+        writer.write(record)
+        i = i + 1
+      }
+      writer.close()
+    }
+    catch {
+      case e: Exception => {
+        e.printStackTrace()
+        Assert.fail(e.getMessage)
+      }
+    }
+  }
+
+  // test multi level -- 4 levels [array of array of array of struct]
+  def buildAvroTestDataMultiLevel4(rows: Int, options: util.Map[String, String]): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+
+    val mySchema =  """ {
+                      |	"name": "address",
+                      |	"type": "record",
+                      |	"fields": [
+                      |		{
+                      |			"name": "name",
+                      |			"type": "string"
+                      |		},
+                      |		{
+                      |			"name": "age",
+                      |			"type": "int"
+                      |		},
+                      |		{
+                      |			"name": "BuildNum",
+                      |			"type": {
+                      |				"type": "array",
+                      |				"items": {
+                      |					"name": "FloorNum",
+                      |					"type": "array",
+                      |					"items": {
+                      |						"name": "doorNum",
+                      |						"type": "array",
+                      |						"items": {
+                      |							"name": "my_address",
+                      |							"type": "record",
+                      |							"fields": [
+                      |								{
+                      |									"name": "street",
+                      |									"type": "string"
+                      |								},
+                      |								{
+                      |									"name": "city",
+                      |									"type": "string"
+                      |								},
+                      |               {
+                      |									"name": "Temperature",
+                      |									"type": "double"
+                      |								},
+                      |               {
+                      |									"name": "WindSpeed",
+                      |									"type": "string"
+                      |								},
+                      |               {
+                      |									"name": "year",
+                      |									"type": "string"
+                      |								}
+                      |							]
+                      |						}
+                      |					}
+                      |				}
+                      |			}
+                      |		}
+                      |	]
+                      |} """.stripMargin
+
+    val json =
+      """ {
+        |	"name": "bob",
+        |	"age": 10,
+        |	"BuildNum": [
+        |		[
+        |			[
+        |				{"street":"abc", "city":"city1", "Temperature":12.6, "WindSpeed":"1234.56", "year":"2018-05-10"},
+        |				{"street":"def", "city":"city2", "Temperature":13.6, "WindSpeed":"1234.56", "year":"2018-05-10"},
+        |				{"street":"cfg", "city":"city3", "Temperature":14.6, "WindSpeed":"1234.56", "year":"2018-05-10"}
+        |			],
+        |			[
+        |				 {"street":"abc1", "city":"city3", "Temperature":12.6, "WindSpeed":"1234.56", "year":"2018-05-10"},
+        |				 {"street":"def1", "city":"city4", "Temperature":12.6, "WindSpeed":"1234.56", "year":"2018-05-10"},
+        |				 {"street":"cfg1", "city":"city5", "Temperature":12.6, "WindSpeed":"1234.56", "year":"2018-05-10"}
+        |			]
+        |		],
+        |		[
+        |			[
+        |				 {"street":"abc2", "city":"cityx", "Temperature":12.6, "WindSpeed":"1234.56", "year":"2018-05-10"},
+        |				 {"street":"abc3", "city":"cityy", "Temperature":12.6, "WindSpeed":"1234.56", "year":"2018-05-10"},
+        |				 {"street":"abc4", "city":"cityz", "Temperature":12.6, "WindSpeed":"1234.56", "year":"2018-05-10"}
+        |			],
+        |			[
+        |				 {"street":"a1bc", "city":"cityA", "Temperature":12.6, "WindSpeed":"1234.56", "year":"2018-05-10"},
+        |				 {"street":"a1bc", "city":"cityB", "Temperature":12.6, "WindSpeed":"1234.56", "year":"2018-05-10"},
+        |				 {"street":"a1bc", "city":"cityc", "Temperature":12.6, "WindSpeed":"1234.56", "year":"2018-05-10"}
+        |			]
+        |		]
+        |	]
+        |} """.stripMargin
+
+    val fields = new Array[Field](3)
+    fields(0) = new Field("name", DataTypes.STRING)
+    fields(1) = new Field("age", DataTypes.INT)
+
+    val subFld = new util.ArrayList[StructField]
+    subFld.add(new StructField("EachDoorNum", DataTypes.INT))
+
+    val address = new util.ArrayList[StructField]
+    address.add(new StructField("street", DataTypes.STRING))
+    address.add(new StructField("city", DataTypes.STRING))
+    address.add(new StructField("Temperature", DataTypes.DOUBLE))
+    address.add(new StructField("WindSpeed", DataTypes.createDecimalType(6,2)))
+    address.add(new StructField("year", DataTypes.DATE))
+
+    val fld = new util.ArrayList[StructField]
+    fld.add(new StructField("DoorNum",
+        DataTypes.createArrayType(DataTypes.createStructType(address)),
+        subFld))
+    // array of struct of struct
+    val doorNum = new util.ArrayList[StructField]
+    doorNum.add(new StructField("FloorNum",
+      DataTypes.createArrayType(
+        DataTypes.createArrayType(DataTypes.createStructType(address))), fld))
+    fields(2) = new Field("BuildNum", "array", doorNum)
+
+    WriteFilesWithAvroWriter(rows, mySchema, json, fields)
+  }
+
+  def buildAvroTestDataMultiLevel4Type(): Any = {
+    FileUtils.deleteDirectory(new File(writerPath))
+    buildAvroTestDataMultiLevel4(3, null)
+  }
+
+  // test multi level -- 4 levels [array of array of array of struct]
+  test("test multi level support : array of array of array of struct") {
+    buildAvroTestDataMultiLevel4Type()
+    assert(new File(writerPath).exists())
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    sql("select * from sdkOutputTable").show(false)
+
+    // TODO: Add a validation
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    cleanTestData()
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d85fb72e/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
index 8a12970..a830185 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
@@ -393,13 +393,6 @@ class TableNewProcessor(cm: TableModel) {
     var allColumns: Seq[ColumnSchema] = Seq[ColumnSchema]()
     fieldChildren.foreach(fields => {
       fields.foreach(field => {
-        if (!useDictionaryEncoding &&
-            (field.dataType.get.equalsIgnoreCase("double") ||
-             field.dataType.get.equalsIgnoreCase("date") ||
-             field.dataType.get.equalsIgnoreCase("decimal"))) {
-          throw new MalformedCarbonCommandException(s"DICTIONARY_EXCLUDE is unsupported for ${
-            field.dataType.get} data type column: ${ field.column }")
-        }
         val encoders = new java.util.ArrayList[Encoding]()
         if (useDictionaryEncoding) {
           encoders.add(Encoding.DICTIONARY)
@@ -439,7 +432,7 @@ class TableNewProcessor(cm: TableModel) {
       if (highCardinalityDims.contains(colName)) {
         encoders.remove(Encoding.DICTIONARY)
       }
-    if (dataType == DataTypes.DATE) {
+    if (dataType == DataTypes.DATE && useDictionaryEncoding) {
         encoders.add(Encoding.DIRECT_DICTIONARY)
       }
       if (dataType == DataTypes.TIMESTAMP &&

http://git-wip-us.apache.org/repos/asf/carbondata/blob/d85fb72e/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
index fdfc3bb..481c811 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
@@ -46,6 +46,7 @@ import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.CarbonUtil;
 import org.apache.carbondata.core.util.DataTypeUtil;
 import org.apache.carbondata.processing.loading.converter.BadRecordLogHolder;
@@ -108,6 +109,7 @@ public class PrimitiveDataType implements GenericDataType<Object> {
 
   private String nullformat;
 
+  private boolean isDirectDictionary;
 
   private PrimitiveDataType(int outputArrayIndex, int dataCounter) {
     this.outputArrayIndex = outputArrayIndex;
@@ -162,9 +164,11 @@ public class PrimitiveDataType implements GenericDataType<Object> {
         new DictionaryColumnUniqueIdentifier(absoluteTableIdentifier,
             carbonDimension.getColumnIdentifier(), carbonDimension.getDataType());
     try {
-      if (carbonDimension.hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+      if (carbonDimension.hasEncoding(Encoding.DIRECT_DICTIONARY)
+          || carbonColumn.getDataType() == DataTypes.DATE) {
         dictionaryGenerator = new DirectDictionary(DirectDictionaryKeyGeneratorFactory
             .getDirectDictionaryGenerator(carbonDimension.getDataType()));
+        isDirectDictionary = true;
       } else if (carbonDimension.hasEncoding(Encoding.DICTIONARY)) {
         CacheProvider cacheProvider = CacheProvider.getInstance();
         Cache<DictionaryColumnUniqueIdentifier, Dictionary> cache =
@@ -307,15 +311,24 @@ public class PrimitiveDataType implements GenericDataType<Object> {
         } else if (this.carbonDimension.getDataType() == DataTypes.TIMESTAMP) {
           dateFormat = carbonDimension.getTimestampFormat();
         }
-
         try {
           if (!this.carbonDimension.getUseActualData()) {
-            byte[] value = DataTypeUtil.getBytesBasedOnDataTypeForNoDictionaryColumn(parsedValue,
-                this.carbonDimension.getDataType(), dateFormat);
-            if (this.carbonDimension.getDataType() == DataTypes.STRING
-                && value.length > CarbonCommonConstants.MAX_CHARS_PER_COLUMN_DEFAULT) {
-              throw new CarbonDataLoadingException("Dataload failed, String size cannot exceed "
-                  + CarbonCommonConstants.MAX_CHARS_PER_COLUMN_DEFAULT + " bytes");
+            byte[] value = null;
+            if (isDirectDictionary) {
+              int surrogateKey = dictionaryGenerator.getOrGenerateKey(parsedValue);
+              if (surrogateKey == CarbonCommonConstants.INVALID_SURROGATE_KEY) {
+                value = new byte[0];
+              } else {
+                value = ByteUtil.toBytes(surrogateKey);
+              }
+            } else {
+              value = DataTypeUtil.getBytesBasedOnDataTypeForNoDictionaryColumn(parsedValue,
+                  this.carbonDimension.getDataType(), dateFormat);
+              if (this.carbonDimension.getDataType() == DataTypes.STRING
+                  && value.length > CarbonCommonConstants.MAX_CHARS_PER_COLUMN_DEFAULT) {
+                throw new CarbonDataLoadingException("Dataload failed, String size cannot exceed "
+                    + CarbonCommonConstants.MAX_CHARS_PER_COLUMN_DEFAULT + " bytes");
+              }
             }
             updateValueToByteStream(dataOutputStream, value);
           } else {


[25/50] [abbrv] carbondata git commit: [CARBONDATA-2369] updated the document about AVRO to carbon schema converter

Posted by ma...@apache.org.
[CARBONDATA-2369] updated the document about AVRO to carbon schema converter

updated the document about AVRO to carbon schema converter

This closes #2296


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/443b717d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/443b717d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/443b717d

Branch: refs/heads/spark-2.3
Commit: 443b717d20e0eedf8be75dae177d573e53c050fc
Parents: d85fb72
Author: ajantha-bhat <aj...@gmail.com>
Authored: Thu May 10 21:03:28 2018 +0530
Committer: kumarvishal09 <ku...@gmail.com>
Committed: Fri May 11 15:47:17 2018 +0530

----------------------------------------------------------------------
 README.md                |  1 +
 docs/sdk-writer-guide.md | 25 ++++++++++++++++++-------
 2 files changed, 19 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/443b717d/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index 3f45917..4b4577e 100644
--- a/README.md
+++ b/README.md
@@ -43,6 +43,7 @@ CarbonData is built using Apache Maven, to [build CarbonData](https://github.com
 * [Cluster Installation and Deployment](https://github.com/apache/carbondata/blob/master/docs/installation-guide.md)
 * [Configuring Carbondata](https://github.com/apache/carbondata/blob/master/docs/configuration-parameters.md)
 * [Streaming Ingestion](https://github.com/apache/carbondata/blob/master/docs/streaming-guide.md)
+* [SDK Writer Guide](https://github.com/apache/carbondata/blob/master/docs/sdk-writer-guide.md)
 * [CarbonData Pre-aggregate DataMap](https://github.com/apache/carbondata/blob/master/docs/datamap/preaggregate-datamap-guide.md)
 * [CarbonData Timeseries DataMap](https://github.com/apache/carbondata/blob/master/docs/datamap/timeseries-datamap-guide.md)
 * [FAQ](https://github.com/apache/carbondata/blob/master/docs/faq.md)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/443b717d/docs/sdk-writer-guide.md
----------------------------------------------------------------------
diff --git a/docs/sdk-writer-guide.md b/docs/sdk-writer-guide.md
index 18b583a..9878b71 100644
--- a/docs/sdk-writer-guide.md
+++ b/docs/sdk-writer-guide.md
@@ -52,6 +52,7 @@ import java.io.IOException;
 
 import org.apache.carbondata.common.exceptions.sql.InvalidLoadOptionException;
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.sdk.file.AvroCarbonWriter;
 import org.apache.carbondata.sdk.file.CarbonWriter;
 import org.apache.carbondata.sdk.file.Field;
 
@@ -75,25 +76,24 @@ public class TestSdkAvro {
             "   \"type\" : \"record\"," +
             "   \"name\" : \"Acme\"," +
             "   \"fields\" : ["
-            + "{ \"name\" : \"name\", \"type\" : \"string\" },"
+            + "{ \"name\" : \"fname\", \"type\" : \"string\" },"
             + "{ \"name\" : \"age\", \"type\" : \"int\" }]" +
             "}";
 
-    String json = "{\"name\":\"bob\", \"age\":10}";
+    String json = "{\"fname\":\"bob\", \"age\":10}";
 
     // conversion to GenericData.Record
     JsonAvroConverter converter = new JsonAvroConverter();
     GenericData.Record record = converter.convertToGenericDataRecord(
         json.getBytes(CharEncoding.UTF_8), new org.apache.avro.Schema.Parser().parse(avroSchema));
 
-    // for sdk schema
-    Field[] fields = new Field[2];
-    fields[0] = new Field("name", DataTypes.STRING);
-    fields[1] = new Field("age", DataTypes.STRING);
+    // prepare carbon schema from avro schema 
+    org.apache.carbondata.sdk.file.Schema carbonSchema =
+            AvroCarbonWriter.getCarbonSchemaFromAvroSchema(avroSchema);
 
     try {
       CarbonWriter writer = CarbonWriter.builder()
-          .withSchema(new org.apache.carbondata.sdk.file.Schema(fields))
+          .withSchema(carbonSchema)
           .outputPath(path)
           .buildWriterForAvroInput();
 
@@ -345,4 +345,15 @@ public Schema(Field[] fields);
 * @return Schema
 */
 public static Schema parseJson(String json);
+```
+
+### Class org.apache.carbondata.sdk.file.AvroCarbonWriter
+```
+/**
+* converts avro schema to carbon schema, required by carbonWriter
+*
+* @param avroSchemaString json formatted avro schema as string
+* @return carbon sdk schema
+*/
+public static org.apache.carbondata.sdk.file.Schema getCarbonSchemaFromAvroSchema(String avroSchemaString);
 ```
\ No newline at end of file


[35/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonAnalyzer.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonAnalyzer.scala b/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonAnalyzer.scala
index 88beb68..dfb89fd 100644
--- a/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonAnalyzer.scala
+++ b/integration/spark2/src/main/spark2.2/org/apache/spark/sql/hive/CarbonAnalyzer.scala
@@ -20,15 +20,32 @@ import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.catalyst.analysis.Analyzer
 import org.apache.spark.sql.catalyst.catalog.SessionCatalog
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.rules.Rule
 import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.util.CarbonReflectionUtils
 
 class CarbonAnalyzer(catalog: SessionCatalog,
     conf: SQLConf,
     sparkSession: SparkSession,
     analyzer: Analyzer) extends Analyzer(catalog, conf) {
+
+  val mvPlan = try {
+    CarbonReflectionUtils.createObject(
+      "org.apache.carbondata.mv.datamap.MVAnalyzerRule",
+      sparkSession)._1.asInstanceOf[Rule[LogicalPlan]]
+  } catch {
+    case e: Exception =>
+      null
+  }
+
   override def execute(plan: LogicalPlan): LogicalPlan = {
     var logicalPlan = analyzer.execute(plan)
     logicalPlan = CarbonPreAggregateDataLoadingRules(sparkSession).apply(logicalPlan)
-    CarbonPreAggregateQueryRules(sparkSession).apply(logicalPlan)
+    logicalPlan = CarbonPreAggregateQueryRules(sparkSession).apply(logicalPlan)
+    if (mvPlan != null) {
+      mvPlan.apply(logicalPlan)
+    } else {
+      logicalPlan
+    }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 7273c76..e9551c0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -640,6 +640,7 @@
       <id>mv</id>
       <modules>
         <module>datamap/mv/plan</module>
+        <module>datamap/mv/core</module>
       </modules>
     </profile>
   </profiles>


[37/50] [abbrv] carbondata git commit: [CARBONDATA-2475] Support Modular Core for Materialized View DataMap for query matching and rewriting

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/2881c6bb/integration/spark-common-test/src/test/resources/tpch/region.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/tpch/region.csv b/integration/spark-common-test/src/test/resources/tpch/region.csv
new file mode 100644
index 0000000..c5ebb63
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/tpch/region.csv
@@ -0,0 +1,5 @@
+0|AFRICA|lar deposits. blithely final packages cajole. regular waters are final requests. regular accounts are according to |
+1|AMERICA|hs use ironic, even requests. s|
+2|ASIA|ges. thinly even pinto beans ca|
+3|EUROPE|ly final courts cajole furiously final excuse|
+4|MIDDLE EAST|uickly special accounts cajole carefully blithely close requests. carefully final asymptotes haggle furiousl|


[20/50] [abbrv] carbondata git commit: [HOTFIX] Fixed checkstyle issue

Posted by ma...@apache.org.
[HOTFIX] Fixed checkstyle issue

Fixed checkstyle issue

This closes #2298


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/8e7fceb7
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/8e7fceb7
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/8e7fceb7

Branch: refs/heads/spark-2.3
Commit: 8e7fceb719e8c471415bf96baca132428424f961
Parents: 1dfbcfc
Author: kumarvishal09 <ku...@gmail.com>
Authored: Thu May 10 23:34:36 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Thu May 10 23:39:36 2018 +0530

----------------------------------------------------------------------
 .../core/readcommitter/LatestFilesReadCommittedScope.java          | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/8e7fceb7/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java b/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
index 8abf537..de7f8a9 100644
--- a/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
+++ b/core/src/main/java/org/apache/carbondata/core/readcommitter/LatestFilesReadCommittedScope.java
@@ -125,7 +125,7 @@ public class LatestFilesReadCommittedScope implements ReadCommittedScope {
     if (file == null) {
       // For nonTransactional table, files can be removed at any point of time.
       // So cannot assume files will be present
-      throw new IOException("No files are present in the table location :"+ carbonFilePath);
+      throw new IOException("No files are present in the table location :" + carbonFilePath);
     }
     Map<String, List<String>> indexFileStore = new HashMap<>();
     if (file.isDirectory()) {