You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@calcite.apache.org by jc...@apache.org on 2018/02/17 03:28:32 UTC

[1/4] calcite git commit: [CALCITE-2170] Use Druid Expressions capabilities to improve the amount of work that can be pushed to Druid

Repository: calcite
Updated Branches:
  refs/heads/master 707f4de9c -> 98f3704ea


http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/test/java/org/apache/calcite/test/DruidAdapterIT.java
----------------------------------------------------------------------
diff --git a/druid/src/test/java/org/apache/calcite/test/DruidAdapterIT.java b/druid/src/test/java/org/apache/calcite/test/DruidAdapterIT.java
index 9d3191c..7c7ba4a 100644
--- a/druid/src/test/java/org/apache/calcite/test/DruidAdapterIT.java
+++ b/druid/src/test/java/org/apache/calcite/test/DruidAdapterIT.java
@@ -35,7 +35,6 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Multimap;
 
-import org.junit.Ignore;
 import org.junit.Test;
 
 import java.net.URL;
@@ -101,6 +100,8 @@ public class DruidAdapterIT {
   private static final String VARCHAR_TYPE =
       "VARCHAR CHARACTER SET \"ISO-8859-1\" COLLATE \"ISO-8859-1$en_US$primary\"";
 
+  private static final String FOODMART_TABLE = "\"foodmart\"";
+
   /** Whether to run this test. */
   protected boolean enabled() {
     return ENABLED;
@@ -197,8 +198,9 @@ public class DruidAdapterIT {
         + "intervals=[[1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z]], "
         + "filter=[=($17, 'Jeremy Corbyn')], groups=[{7}], aggs=[[]])\n";
     final String druidQuery = "{'queryType':'groupBy',"
-        + "'dataSource':'wikiticker','granularity':{'type':'all'},"
-        + "'dimensions':[{'type':'default','dimension':'countryName'}],'limitSpec':{'type':'default'},"
+        + "'dataSource':'wikiticker','granularity':'all',"
+        + "'dimensions':[{'type':'default','dimension':'countryName','outputName':'countryName',"
+        + "'outputType':'STRING'}],'limitSpec':{'type':'default'},"
         + "'filter':{'type':'selector','dimension':'page','value':'Jeremy Corbyn'},"
         + "'aggregations':[],"
         + "'intervals':['1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z']}";
@@ -239,17 +241,18 @@ public class DruidAdapterIT {
         + "from \"wikiticker\"\n"
         + "limit 1\n";
     final String explain =
-        "DruidQuery(table=[[wiki, wikiticker]], intervals=[[1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z]], projects=[[$0]], fetch=[1])\n";
+        "PLAN=EnumerableInterpreter\n"
+            + "  DruidQuery(table=[[wiki, wikiticker]], intervals=[[1900-01-01T00:00:00.000Z/"
+            + "3000-01-01T00:00:00.000Z]], projects=[[CAST($0):TIMESTAMP(0) NOT NULL]], fetch=[1])";
     final String druidQuery = "{'queryType':'scan',"
         + "'dataSource':'wikiticker',"
         + "'intervals':['1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z'],"
-        + "'columns':['__time'],'granularity':{'type':'all'},"
+        + "'columns':['__time'],'granularity':'all',"
         + "'resultFormat':'compactedList','limit':1}";
 
     sql(sql, WIKI_AUTO2)
         .returnsUnordered("__time=2015-09-12 00:46:58")
-        .explainContains(explain)
-        .queryContains(druidChecker(druidQuery));
+        .explainContains(explain);
   }
 
   @Test public void testSelectTimestampColumnNoTables3() {
@@ -260,7 +263,8 @@ public class DruidAdapterIT {
         + "from \"wikiticker\"\n"
         + "group by floor(\"__time\" to DAY)";
     final String explain =
-        "DruidQuery(table=[[wiki, wikiticker]], intervals=[[1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z]], projects=[[FLOOR($0, FLAG(DAY)), $1]], groups=[{0}], aggs=[[SUM($1)]])\n";
+        "PLAN=EnumerableInterpreter\n"
+            + "  DruidQuery(table=[[wiki, wikiticker]], intervals=[[1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z]], projects=[[FLOOR($0, FLAG(DAY)), $1]], groups=[{0}], aggs=[[SUM($1)]], post_projects=[[CAST($0):TIMESTAMP(0) NOT NULL, $1]])";
     final String druidQuery = "{'queryType':'timeseries',"
         + "'dataSource':'wikiticker','descending':false,'granularity':{'type':'period','period':'P1D','timeZone':'UTC'},"
         + "'aggregations':[{'type':'longSum','name':'EXPR$1','fieldName':'added'}],"
@@ -281,10 +285,7 @@ public class DruidAdapterIT {
         + "group by \"page\", floor(\"__time\" to DAY)\n"
         + "order by \"s\" desc";
     final String explain = "PLAN=EnumerableInterpreter\n"
-        + "  BindableProject(s=[$2], page=[$0], day=[CAST($1):TIMESTAMP(0) NOT NULL])\n"
-        + "    DruidQuery(table=[[wiki, wikiticker]], "
-        + "intervals=[[1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z]], projects=[[$17, FLOOR"
-        + "($0, FLAG(DAY)), $1]], groups=[{0, 1}], aggs=[[SUM($2)]], sort0=[2], dir0=[DESC])";
+        + "  DruidQuery(table=[[wiki, wikiticker]], intervals=[[1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z]], projects=[[$17, FLOOR($0, FLAG(DAY)), $1]], groups=[{0, 1}], aggs=[[SUM($2)]], post_projects=[[$2, $0, CAST($1):TIMESTAMP(0) NOT NULL]], sort0=[0], dir0=[DESC])";
     sql(sql, WIKI_AUTO2)
         .limit(1)
         .returnsUnordered("s=199818; page=User:QuackGuru/Electronic cigarettes 1; "
@@ -319,8 +320,9 @@ public class DruidAdapterIT {
         + "from \"" + tableName + "\"\n"
         + "where \"page\" = 'Jeremy Corbyn'";
     final String druidQuery = "{'queryType':'groupBy',"
-        + "'dataSource':'wikiticker','granularity':{'type':'all'},"
-        + "'dimensions':[{'type':'default','dimension':'countryName'}],'limitSpec':{'type':'default'},"
+        + "'dataSource':'wikiticker','granularity':'all',"
+        + "'dimensions':[{'type':'default','dimension':'countryName','outputName':'countryName',"
+        + "'outputType':'STRING'}],'limitSpec':{'type':'default'},"
         + "'filter':{'type':'selector','dimension':'page','value':'Jeremy Corbyn'},"
         + "'aggregations':[],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
@@ -338,13 +340,14 @@ public class DruidAdapterIT {
     final String sql = "select cast(\"__time\" as timestamp) as \"__time\"\n"
         + "from \"wikiticker\"\n"
         + "where \"__time\" < '2015-10-12 00:00:00 UTC'";
-    final String explain = "\n    DruidQuery(table=[[wiki, wikiticker]], "
-        + "intervals=[[1900-01-01T00:00:00.000Z/2015-10-12T00:00:00.000Z]], "
-        + "projects=[[$0]])\n";
+    final String explain = "PLAN=EnumerableInterpreter\n"
+        + "  DruidQuery(table=[[wiki, wikiticker]],"
+        + " intervals=[[1900-01-01T00:00:00.000Z/2015-10-12T00:00:00.000Z]], "
+        + "projects=[[CAST($0):TIMESTAMP(0) NOT NULL]])";
     final String druidQuery = "{'queryType':'scan',"
         + "'dataSource':'wikiticker',"
         + "'intervals':['1900-01-01T00:00:00.000Z/2015-10-12T00:00:00.000Z'],"
-        + "'columns':['__time'],'granularity':{'type':'all'},"
+        + "'virtualColumns':[{'type':'expression','name':'vc','expression':'\\'__time\\'','outputType':'LONG'}],'columns':['vc'],"
         + "'resultFormat':'compactedList'";
     sql(sql, WIKI_AUTO2)
         .limit(2)
@@ -355,26 +358,24 @@ public class DruidAdapterIT {
   }
 
   @Test public void testFilterTimeDistinct() {
-    final String sql = "select CAST(\"c1\" AS timestamp) as \"__time\" from\n"
+    final String sql = "select CAST(\"c1\" AS timestamp) as \"time\" from\n"
         + "(select distinct \"__time\" as \"c1\"\n"
         + "from \"wikiticker\"\n"
         + "where \"__time\" < '2015-10-12 00:00:00 UTC')";
-    final String explain = "PLAN="
-        + "EnumerableInterpreter\n"
-        + "  BindableProject(__time=[CAST($0):TIMESTAMP(0) NOT NULL])\n"
-        + "    DruidQuery(table=[[wiki, wikiticker]], "
-        + "intervals=[[1900-01-01T00:00:00.000Z/2015-10-12T00:00:00.000Z]], "
-        + "groups=[{0}], aggs=[[]])\n";
+    final String explain = "PLAN=EnumerableInterpreter\n"
+        + "  DruidQuery(table=[[wiki, wikiticker]], intervals=[[1900-01-01T00:00:00.000Z/"
+        + "3000-01-01T00:00:00.000Z]], projects=[[$0]], groups=[{0}], aggs=[[]], "
+        + "filter=[<($0, 2015-10-12 00:00:00)], projects=[[CAST($0):TIMESTAMP(0) NOT NULL]])\n";
     final String subDruidQuery = "{'queryType':'groupBy','dataSource':'wikiticker',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'extraction',"
+        + "'granularity':'all','dimensions':[{'type':'extraction',"
         + "'dimension':'__time','outputName':'extract',"
         + "'extractionFn':{'type':'timeFormat'";
     sql(sql, WIKI_AUTO2)
         .limit(2)
+        .returnsUnordered("time=2015-09-12 00:46:58",
+            "time=2015-09-12 00:47:00")
         .explainContains(explain)
-        .queryContains(druidChecker(subDruidQuery))
-        .returnsUnordered("__time=2015-09-12 00:46:58",
-            "__time=2015-09-12 00:47:00");
+        .queryContains(druidChecker(subDruidQuery));
   }
 
   @Test public void testMetadataColumns() throws Exception {
@@ -411,9 +412,9 @@ public class DruidAdapterIT {
         + "EnumerableInterpreter\n"
         + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], groups=[{30}], aggs=[[]])";
     final String sql = "select distinct \"state_province\" from \"foodmart\"";
-    final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},"
-        + "'dimensions':[{'type':'default','dimension':'state_province'}],'limitSpec':{'type':'default'},"
+    final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart','granularity':'all',"
+        + "'dimensions':[{'type':'default','dimension':'state_province','outputName':'state_province'"
+        + ",'outputType':'STRING'}],'limitSpec':{'type':'default'},"
         + "'aggregations':[],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
     sql(sql)
@@ -426,11 +427,9 @@ public class DruidAdapterIT {
 
   @Test public void testSelectGroupBySum() {
     final String explain = "PLAN=EnumerableInterpreter\n"
-        + "  BindableAggregate(group=[{0}], U=[SUM($1)])\n"
-        + "    BindableProject(state_province=[$0], $f1=[CAST($1):INTEGER])\n"
-        + "      DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]],"
-        + " projects=[[$30, $89]])";
+        + "  DruidQuery(table=[[foodmart, foodmart]], "
+        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], "
+        + "projects=[[$30, CAST($89):INTEGER]], groups=[{0}], aggs=[[SUM($1)]])";
     final String sql = "select \"state_province\", sum(cast(\"unit_sales\" as integer)) as u\n"
         + "from \"foodmart\"\n"
         + "group by \"state_province\"";
@@ -448,12 +447,12 @@ public class DruidAdapterIT {
         + "  DruidQuery(table=[[foodmart, foodmart]], "
         + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], filter=[=($1, 1020)],"
         + " projects=[[$90, $1]], groups=[{0, 1}], aggs=[[]])";
-    final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},"
-        + "'dimensions':[{'type':'default','dimension':'store_sales'},"
-        + "{'type':'default','dimension':'product_id'}],'limitSpec':{'type':'default'},'"
-        + "filter':{'type':'selector','dimension':'product_id','value':'1020'},"
-        + "'aggregations':[],"
+    final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart','granularity':'all',"
+        + "'dimensions':[{'type':'default','dimension':'store_sales',\"outputName\":\"store_sales\","
+        + "'outputType':'DOUBLE'},{'type':'default','dimension':'product_id','outputName':"
+        + "'product_id','outputType':'STRING'}],'limitSpec':{'type':'default'},"
+        + "'filter':{'type':'bound','dimension':'product_id','lower':'1020','lowerStrict':false,"
+        + "'upper':'1020','upperStrict':false,'ordering':'numeric'},'aggregations':[],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
     sql(sql)
         .explainContains(plan)
@@ -469,13 +468,13 @@ public class DruidAdapterIT {
     final String sql = "select \"product_id\" from \"foodmart\" where "
             + "\"product_id\" = 1020 group by \"product_id\"";
     final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-            + "'granularity':{'type':'all'},'dimensions':[{'type':'default',"
-            + "'dimension':'product_id'}],"
-            + "'limitSpec':{'type':'default'},'filter':{'type':'selector',"
-            + "'dimension':'product_id','value':'1020'},"
-            + "'aggregations':[],"
-            + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
-    sql(sql).queryContains(druidChecker(druidQuery)).returnsUnordered("product_id=1020");
+        + "'granularity':'all','dimensions':[{'type':'default',"
+        + "'dimension':'product_id','outputName':'product_id','outputType':'STRING'}],"
+        + "'limitSpec':{'type':'default'},'filter':{'type':'bound','dimension':'product_id',"
+        + "'lower':'1020','lowerStrict':false,'upper':'1020','upperStrict':false,"
+        + "'ordering':'numeric'},'aggregations':[],"
+        + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
+    sql(sql).returnsUnordered("product_id=1020").queryContains(druidChecker(druidQuery));
   }
 
   @Test public void testComplexPushGroupBy() {
@@ -483,12 +482,12 @@ public class DruidAdapterIT {
             + "\"product_id\" = 1020";
     final String sql = "select \"id\" from (" + innerQuery + ") group by \"id\"";
     final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-            + "'granularity':{'type':'all'},"
-            + "'dimensions':[{'type':'default','dimension':'product_id'}],"
-            + "'limitSpec':{'type':'default'},"
-            + "'filter':{'type':'selector','dimension':'product_id','value':'1020'},"
-            + "'aggregations':[],"
-            + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
+        + "'granularity':'all',"
+        + "'dimensions':[{'type':'default','dimension':'product_id','outputName':'product_id',"
+        + "'outputType':'STRING'}],'limitSpec':{'type':'default'},"
+        + "'filter':{'type':'bound','dimension':'product_id','lower':'1020','lowerStrict':false,"
+        + "'upper':'1020','upperStrict':false,'ordering':'numeric'},'aggregations':[],"
+        + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
     sql(sql)
         .returnsUnordered("id=1020")
         .queryContains(druidChecker(druidQuery));
@@ -532,14 +531,13 @@ public class DruidAdapterIT {
             "gender=M; state_province=WA",
             "gender=F; state_province=WA")
         .queryContains(
-            druidChecker("{'queryType':'groupBy','dataSource':'foodmart',"
-                + "'granularity':{'type':'all'},'dimensions':[{'type':'default',"
-                + "'dimension':'gender'},{'type':'default',"
-                + "'dimension':'state_province'}],'limitSpec':{'type':'default',"
-                + "'columns':[{'dimension':'state_province','direction':'ascending',"
-                + "'dimensionOrder':'alphanumeric'},{'dimension':'gender',"
-                + "'direction':'descending','dimensionOrder':'alphanumeric'}]},"
-                + "'aggregations':[],"
+            druidChecker("{'queryType':'groupBy','dataSource':'foodmart','granularity':'all',"
+                + "'dimensions':[{'type':'default','dimension':'gender','outputName':'gender',"
+                + "'outputType':'STRING'},{'type':'default','dimension':'state_province',"
+                + "'outputName':'state_province','outputType':'STRING'}],'limitSpec':"
+                + "{'type':'default','columns':[{'dimension':'state_province','direction':'ascending'"
+                + ",'dimensionOrder':'lexicographic'},{'dimension':'gender','direction':'descending',"
+                + "'dimensionOrder':'lexicographic'}]},'aggregations':[],"
                 + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}"))
         .explainContains(explain);
   }
@@ -568,7 +566,7 @@ public class DruidAdapterIT {
         + "offset 2 fetch next 3 rows only";
     final String druidQuery = "{'queryType':'scan','dataSource':'foodmart',"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z'],"
-        + "'columns':['state_province','product_name'],'granularity':{'type':'all'},"
+        + "'columns':['state_province','product_name'],"
         + "'resultFormat':'compactedList'}";
     sql(sql)
         .runs()
@@ -580,7 +578,7 @@ public class DruidAdapterIT {
         + "from \"foodmart\" fetch next 3 rows only";
     final String druidQuery = "{'queryType':'scan','dataSource':'foodmart',"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z'],"
-        + "'columns':['gender','state_province'],'granularity':{'type':'all'},"
+        + "'columns':['gender','state_province'],"
         + "'resultFormat':'compactedList','limit':3";
     sql(sql)
         .runs()
@@ -591,8 +589,10 @@ public class DruidAdapterIT {
     final String sql = "select distinct \"gender\", \"state_province\"\n"
         + "from \"foodmart\" fetch next 3 rows only";
     final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'default','dimension':'gender'},"
-        + "{'type':'default','dimension':'state_province'}],'limitSpec':{'type':'default',"
+        + "'granularity':'all','dimensions':[{'type':'default','dimension':'gender',"
+        + "'outputName':'gender','outputType':'STRING'},"
+        + "{'type':'default','dimension':'state_province','outputName':'state_province',"
+        + "'outputType':'STRING'}],'limitSpec':{'type':'default',"
         + "'limit':3,'columns':[]},"
         + "'aggregations':[],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
@@ -617,16 +617,17 @@ public class DruidAdapterIT {
         + "group by \"brand_name\", \"gender\"\n"
         + "order by s desc limit 3";
     final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'default',"
-        + "'dimension':'brand_name'},{'type':'default','dimension':'gender'}],"
+        + "'granularity':'all','dimensions':[{'type':'default',"
+        + "'dimension':'brand_name','outputName':'brand_name','outputType':'STRING'},"
+        + "{'type':'default','dimension':'gender','outputName':'gender','outputType':'STRING'}],"
         + "'limitSpec':{'type':'default','limit':3,'columns':[{'dimension':'S',"
         + "'direction':'descending','dimensionOrder':'numeric'}]},"
         + "'aggregations':[{'type':'longSum','name':'S','fieldName':'unit_sales'}],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
     final String explain = "PLAN=EnumerableInterpreter\n"
-        + "  DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], "
-        + "groups=[{2, 39}], aggs=[[SUM($89)]], sort0=[2], dir0=[DESC], fetch=[3])\n";
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$2, $39, $89]], groups=[{0, 1}], "
+        + "aggs=[[SUM($2)]], sort0=[2], dir0=[DESC], fetch=[3])";
     sql(sql)
         .runs()
         .returnsOrdered("brand_name=Hermanos; gender=M; S=4286",
@@ -655,24 +656,22 @@ public class DruidAdapterIT {
         + "from \"foodmart\"\n"
         + "group by \"brand_name\"\n"
         + "order by s desc limit 3";
-    final String approxDruid = "{'queryType':'topN','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},"
-        + "'dimension':{'type':'default','dimension':'brand_name'},'metric':'S',"
+    final String approxDruid = "{'queryType':'topN','dataSource':'foodmart','granularity':'all',"
+        + "'dimension':{'type':'default','dimension':'brand_name','outputName':'brand_name','outputType':'STRING'},'metric':'S',"
         + "'aggregations':[{'type':'longSum','name':'S','fieldName':'unit_sales'}],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z'],"
         + "'threshold':3}";
-    final String exactDruid = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'default',"
-        + "'dimension':'brand_name'}],'limitSpec':{'type':'default','limit':3,"
-        + "'columns':[{'dimension':'S','direction':'descending',"
-        + "'dimensionOrder':'numeric'}]},'aggregations':[{'type':'longSum',"
-        + "'name':'S','fieldName':'unit_sales'}],"
+    final String exactDruid = "{'queryType':'groupBy','dataSource':'foodmart','granularity':'all',"
+        + "'dimensions':[{'type':'default','dimension':'brand_name','outputName':'brand_name',"
+        + "'outputType':'STRING'}],'limitSpec':{'type':'default','limit':3,'columns':"
+        + "[{'dimension':'S','direction':'descending','dimensionOrder':'numeric'}]},'aggregations':"
+        + "[{'type':'longSum','name':'S','fieldName':'unit_sales'}],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
     final String druidQuery = approx ? approxDruid : exactDruid;
     final String explain = "PLAN=EnumerableInterpreter\n"
-        + "  DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], "
-        + "groups=[{2}], aggs=[[SUM($89)]], sort0=[1], dir0=[DESC], fetch=[3])\n";
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$2, $89]], groups=[{0}], "
+        + "aggs=[[SUM($1)]], sort0=[1], dir0=[DESC], fetch=[3])";
     CalciteAssert.that()
         .enable(enabled())
         .with(ImmutableMap.of("model", FOODMART.getPath()))
@@ -700,10 +699,11 @@ public class DruidAdapterIT {
         + "group by \"brand_name\", floor(\"timestamp\" to DAY)\n"
         + "order by s desc limit 30";
     final String explain =
-        "    DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[$2, FLOOR"
-        + "($0, FLAG(DAY)), $89]], groups=[{0, 1}], aggs=[[SUM($2)]], sort0=[2], dir0=[DESC], "
-        + "fetch=[30])";
+        "PLAN=EnumerableInterpreter\n"
+            + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+            + "2992-01-10T00:00:00.000Z]], projects=[[$2, FLOOR($0, FLAG(DAY)), $89]], "
+            + "groups=[{0, 1}], aggs=[[SUM($2)]], post_projects=[[$0, "
+            + "CAST($1):TIMESTAMP(0) NOT NULL, $2]], sort0=[2], dir0=[DESC], fetch=[30])";
     sql(sql)
         .runs()
         .returnsStartingWith("brand_name=Ebony; D=1997-07-27 00:00:00; S=135",
@@ -711,7 +711,7 @@ public class DruidAdapterIT {
             "brand_name=Hermanos; D=1997-05-09 00:00:00; S=115")
         .explainContains(explain)
         .queryContains(
-            druidChecker("'queryType':'groupBy'", "'granularity':{'type':'all'}", "'limitSpec"
+            druidChecker("'queryType':'groupBy'", "'granularity':'all'", "'limitSpec"
                 + "':{'type':'default','limit':30,'columns':[{'dimension':'S',"
                 + "'direction':'descending','dimensionOrder':'numeric'}]}"));
   }
@@ -731,19 +731,17 @@ public class DruidAdapterIT {
         + "from \"foodmart\"\n"
         + "group by \"brand_name\", floor(\"timestamp\" to DAY)\n"
         + "order by s desc limit 30";
-    final String druidQueryPart1 = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'default',"
-        + "'dimension':'brand_name'},{'type':'extraction','dimension':'__time',"
-        + "'outputName':'floor_day','extractionFn':{'type':'timeFormat'";
+    final String druidQueryPart1 = "{'queryType':'groupBy','dataSource':'foodmart'";
     final String druidQueryPart2 = "'limitSpec':{'type':'default','limit':30,"
         + "'columns':[{'dimension':'S','direction':'descending',"
         + "'dimensionOrder':'numeric'}]},'aggregations':[{'type':'longSum',"
         + "'name':'S','fieldName':'unit_sales'}],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
-    final String explain = "DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[$2, FLOOR"
-        + "($0, FLAG(DAY)), $89]], groups=[{0, 1}], aggs=[[SUM($2)]], sort0=[2], dir0=[DESC], "
-        + "fetch=[30])";
+    final String explain = "PLAN=EnumerableInterpreter\n"
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$2, FLOOR($0, FLAG(DAY)), $89]], groups=[{0, 1}], "
+        + "aggs=[[SUM($2)]], post_projects=[[$0, CAST($1):TIMESTAMP(0) NOT NULL, $2]], "
+        + "sort0=[2], dir0=[DESC], fetch=[30])";
     sql(sql)
         .runs()
         .returnsStartingWith("brand_name=Ebony; D=1997-07-27 00:00:00; S=135",
@@ -764,12 +762,15 @@ public class DruidAdapterIT {
         + "group by \"brand_name\", floor(\"timestamp\" to DAY)\n"
         + "order by \"brand_name\"";
     final String subDruidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'default',"
-        + "'dimension':'brand_name'},{'type':'extraction','dimension':'__time',"
+        + "'granularity':'all','dimensions':[{'type':'default',"
+        + "'dimension':'brand_name','outputName':'brand_name','outputType':'STRING'},"
+        + "{'type':'extraction','dimension':'__time',"
         + "'outputName':'floor_day','extractionFn':{'type':'timeFormat'";
-    final String explain = "    DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[$2, FLOOR"
-        + "($0, FLAG(DAY)), $89]], groups=[{0, 1}], aggs=[[SUM($2)]], sort0=[0], dir0=[ASC])";
+    final String explain = "PLAN=EnumerableInterpreter\n"
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$2, FLOOR($0, FLAG(DAY)), $89]], groups=[{0, 1}],"
+        + " aggs=[[SUM($2)]], post_projects=[[$0, CAST($1):TIMESTAMP(0) NOT NULL, $2]], "
+        + "sort0=[0], dir0=[ASC])";
     sql(sql)
         .runs()
         .returnsStartingWith("brand_name=ADJ; D=1997-01-11 00:00:00; S=2",
@@ -790,7 +791,7 @@ public class DruidAdapterIT {
         + "'filter':{'type':'and','fields':["
         + "{'type':'bound','dimension':'product_id','lower':'1500','lowerStrict':false,'ordering':'lexicographic'},"
         + "{'type':'bound','dimension':'product_id','upper':'1502','upperStrict':false,'ordering':'lexicographic'}]},"
-        + "'columns':['product_name','state_province','product_id'],'granularity':{'type':'all'},"
+        + "'columns':['product_name','state_province','product_id'],"
         + "'resultFormat':'compactedList'";
     sql(sql)
         .limit(4)
@@ -823,7 +824,7 @@ public class DruidAdapterIT {
         + "'filter':{'type':'and','fields':["
         + "{'type':'bound','dimension':'product_id','lower':'1500','lowerStrict':false,'ordering':'numeric'},"
         + "{'type':'bound','dimension':'product_id','upper':'1502','upperStrict':false,'ordering':'numeric'}]},"
-        + "'columns':['product_name','state_province','product_id'],'granularity':{'type':'all'},"
+        + "'columns':['product_name','state_province','product_id'],"
         + "'resultFormat':'compactedList'";
     sql(sql)
         .limit(4)
@@ -852,8 +853,9 @@ public class DruidAdapterIT {
         + "where \"product_id\" = -1";
     final String druidQuery = "{'queryType':'scan','dataSource':'foodmart',"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z'],"
-        + "'filter':{'type':'selector','dimension':'product_id','value':'-1'},"
-        + "'columns':['product_name'],'granularity':{'type':'all'},"
+        + "'filter':{'type':'bound','dimension':'product_id','lower':'-1','lowerStrict':false,"
+        + "'upper':'-1','upperStrict':false,'ordering':'numeric'},"
+        + "'columns':['product_name'],"
         + "'resultFormat':'compactedList'}";
     sql(sql)
         .limit(4)
@@ -868,9 +870,12 @@ public class DruidAdapterIT {
         + "where cast(\"product_id\" as integer) - 1500 BETWEEN 0 AND 2\n"
         + "order by \"state_province\" desc, \"product_id\"";
     final String druidQuery = "{'queryType':'scan','dataSource':'foodmart',"
-        + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z'],"
-        + "'columns':['product_id','product_name','state_province'],'granularity':{'type':'all'},"
+        + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z'],";
+    final String druidFilter = "\"filter\":{\"type\":\"and\","
+        + "\"fields\":[{\"type\":\"expression\",\"expression\":\"((CAST(\\\"product_id\\\"";
+    final String druidQuery2 = "'columns':['product_name','state_province','product_id'],"
         + "'resultFormat':'compactedList'}";
+
     sql(sql)
         .limit(4)
         .returns(
@@ -889,7 +894,7 @@ public class DruidAdapterIT {
                 }
               }
             })
-        .queryContains(druidChecker(druidQuery));
+        .queryContains(druidChecker(druidQuery, druidFilter, druidQuery2));
   }
 
   @Test public void testUnionPlan() {
@@ -930,12 +935,13 @@ public class DruidAdapterIT {
 
   @Test public void testCountGroupByEmpty() {
     final String druidQuery = "{'queryType':'timeseries','dataSource':'foodmart',"
-        + "'descending':false,'granularity':{'type':'all'},"
+        + "'descending':false,'granularity':'all',"
         + "'aggregations':[{'type':'count','name':'EXPR$0'}],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z'],"
         + "'context':{'skipEmptyBuckets':false}}";
     final String explain = "PLAN=EnumerableInterpreter\n"
-        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[]], groups=[{}], aggs=[[COUNT()]])";
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[0]], groups=[{}], aggs=[[COUNT()]])";
     final String sql = "select count(*) from \"foodmart\"";
     sql(sql)
         .returns("EXPR$0=86829\n")
@@ -978,8 +984,7 @@ public class DruidAdapterIT {
                     + "order by \"c\" desc limit 3";
     sql(sql).returnsOrdered("c=494; month=1997-11-01 00:00:00; SALES=5.0",
             "c=475; month=1997-12-01 00:00:00; SALES=5.0",
-            "c=468; month=1997-03-01 00:00:00; SALES=5.0"
-    ).queryContains(druidChecker("'queryType':'scan'"));
+            "c=468; month=1997-03-01 00:00:00; SALES=5.0").queryContains(druidChecker("'queryType':'groupBy'"));
   }
 
   @Test public void testGroupByTimeAndOneColumnNotProjected() {
@@ -1011,8 +1016,8 @@ public class DruidAdapterIT {
         + "group by \"state_province\"\n"
         + "order by \"state_province\"";
     String explain = "PLAN=EnumerableInterpreter\n"
-        + "  DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], groups=[{30}], "
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$30]], groups=[{0}], "
         + "aggs=[[COUNT()]], sort0=[0], dir0=[ASC])";
     sql(sql)
         .limit(2)
@@ -1045,7 +1050,25 @@ public class DruidAdapterIT {
         .limit(2)
         .returnsUnordered("state_province=CA; A=3; S=74748; C=16347; C0=24441",
             "state_province=OR; A=3; S=67659; C=21610; C0=21610")
-        .queryContains(druidChecker("'queryType':'scan'"));
+        .explainContains("PLAN=EnumerableInterpreter\n"
+            + "  BindableProject(state_province=[$0], A=[CAST(/(CASE(=($2, 0), null, $1), $2)):BIGINT],"
+            + " S=[CASE(=($2, 0), null, $1)], C=[$3], C0=[$4])\n"
+            + "    DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+            + "2992-01-10T00:00:00.000Z]], projects=[[$30, $89, $71]], groups=[{0}], "
+            + "aggs=[[$SUM0($1), COUNT($1), COUNT($2), COUNT()]], sort0=[0], dir0=[ASC])")
+        .queryContains(
+            druidChecker("{'queryType':'groupBy','dataSource':'foodmart','granularity':'all'"
+                + ",'dimensions':[{'type':'default','dimension':'state_province','outputName':'state_province'"
+                + ",'outputType':'STRING'}],'limitSpec':"
+                + "{'type':'default','columns':[{'dimension':'state_province',"
+                + "'direction':'ascending','dimensionOrder':'lexicographic'}]},'aggregations':"
+                + "[{'type':'longSum','name':'$f1','fieldName':'unit_sales'},{'type':'filtered',"
+                + "'filter':{'type':'not','field':{'type':'selector','dimension':'unit_sales',"
+                + "'value':null}},'aggregator':{'type':'count','name':'$f2','fieldName':'unit_sales'}}"
+                + ",{'type':'filtered','filter':{'type':'not','field':{'type':'selector',"
+                + "'dimension':'store_sqft','value':null}},'aggregator':{'type':'count','name':'C',"
+                + "'fieldName':'store_sqft'}},{'type':'count','name':'C0'}],"
+                + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}"));
   }
 
   @Test public void testGroupByMonthGranularity() {
@@ -1053,15 +1076,14 @@ public class DruidAdapterIT {
         + " count(\"store_sqft\") as c\n"
         + "from \"foodmart\"\n"
         + "group by floor(\"timestamp\" to MONTH) order by s";
-    String druidQuery = "{'queryType':'timeseries','dataSource':'foodmart'";
+    String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart'";
     sql(sql)
         .limit(3)
         .explainContains("PLAN=EnumerableInterpreter\n"
-            + "  BindableSort(sort0=[$0], dir0=[ASC])\n"
-            + "    BindableProject(S=[$1], C=[$2])\n"
-            + "      DruidQuery(table=[[foodmart, foodmart]], "
-            + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[FLOOR"
-            + "($0, FLAG(MONTH)), $89, $71]], groups=[{0}], aggs=[[SUM($1), COUNT($2)]])")
+            + "  BindableProject(S=[$1], C=[$2])\n"
+            + "    DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+            + "2992-01-10T00:00:00.000Z]], projects=[[FLOOR($0, FLAG(MONTH)), $89, $71]], "
+            + "groups=[{0}], aggs=[[SUM($1), COUNT($2)]], sort0=[1], dir0=[ASC])")
         .returnsOrdered("S=19958; C=5606", "S=20179; C=5523", "S=20388; C=5591")
         .queryContains(druidChecker(druidQuery));
   }
@@ -1105,12 +1127,10 @@ public class DruidAdapterIT {
         + "group by floor(\"timestamp\" to MONTH)\n"
         + "order by floor(\"timestamp\" to MONTH) limit 3";
     final String explain = "PLAN=EnumerableInterpreter\n"
-        + "  BindableProject(M=[CAST($0):TIMESTAMP(0) NOT NULL], S=[$1], C=[$2], EXPR$3=[$0])\n"
-        + "    BindableSort(sort0=[$0], dir0=[ASC], fetch=[3])\n"
-        + "      DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[FLOOR($0, "
-        + "FLAG(MONTH)), $89, $71]], groups=[{0}], aggs=[[SUM($1), COUNT($2)]], sort0=[0], "
-        + "dir0=[ASC])";
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[FLOOR($0, FLAG(MONTH)), $89, $71]], groups=[{0}], "
+        + "aggs=[[SUM($1), COUNT($2)]], post_projects=[[CAST($0):TIMESTAMP(0) NOT NULL, $1, $2, $0]]"
+        + ", sort0=[3], dir0=[ASC], fetch=[3])";
     sql(sql)
         .returnsOrdered("M=1997-01-01 00:00:00; S=21628; C=5957",
             "M=1997-02-01 00:00:00; S=20957; C=5842",
@@ -1123,7 +1143,7 @@ public class DruidAdapterIT {
         + " count(\"store_sqft\") as c\n"
         + "from \"foodmart\"\n"
         + "group by floor(\"timestamp\" to DAY) order by c desc";
-    String druidQuery = "{'queryType':'timeseries','dataSource':'foodmart'";
+    String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart'";
     sql(sql)
         .limit(3)
         .queryContains(druidChecker(druidQuery))
@@ -1137,7 +1157,7 @@ public class DruidAdapterIT {
         + "where \"timestamp\" >= '1996-01-01 00:00:00 UTC' and "
         + " \"timestamp\" < '1998-01-01 00:00:00 UTC'\n"
         + "group by floor(\"timestamp\" to MONTH) order by s asc";
-    String druidQuery = "{'queryType':'timeseries','dataSource':'foodmart'";
+    String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart'";
 
     sql(sql)
         .limit(3)
@@ -1162,8 +1182,9 @@ public class DruidAdapterIT {
         + "($0, FLAG(MONTH)), $89]], groups=[{0, 1}], aggs=[[SUM($2), MAX($2)]], sort0=[2], "
         + "dir0=[DESC], fetch=[3])";
     final String druidQueryPart1 = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'default',"
-        + "'dimension':'state_province'},{'type':'extraction','dimension':'__time',"
+        + "'granularity':'all','dimensions':[{'type':'default',"
+        + "'dimension':'state_province',\"outputName\":\"state_province\",\"outputType\":\"STRING\"},"
+        + "{'type':'extraction','dimension':'__time',"
         + "'outputName':'floor_month','extractionFn':{'type':'timeFormat','format'";
     final String druidQueryPart2 = "'limitSpec':{'type':'default','limit':3,"
         + "'columns':[{'dimension':'S','direction':'descending',"
@@ -1195,7 +1216,7 @@ public class DruidAdapterIT {
         + "($0, FLAG(DAY)), $89]], groups=[{0, 1}], aggs=[[SUM($2), MAX($2)]], sort0=[2], "
         + "dir0=[DESC], fetch=[6])";
     final String druidQueryType = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions'";
+        + "'granularity':'all','dimensions'";
     final String limitSpec = "'limitSpec':{'type':'default','limit':6,"
         + "'columns':[{'dimension':'S','direction':'descending','dimensionOrder':'numeric'}]}";
     sql(sql)
@@ -1210,15 +1231,13 @@ public class DruidAdapterIT {
   }
 
   @Test public void testGroupByHaving() {
-    // Note: We don't push down HAVING yet
     final String sql = "select \"state_province\" as s, count(*) as c\n"
         + "from \"foodmart\"\n"
         + "group by \"state_province\" having count(*) > 23000 order by 1";
-    final String explain = "PLAN="
-        + "EnumerableInterpreter\n"
-        + "  BindableSort(sort0=[$0], dir0=[ASC])\n"
-        + "    BindableFilter(condition=[>($1, 23000)])\n"
-        + "      DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], groups=[{30}], aggs=[[COUNT()]])";
+    final String explain = "PLAN=EnumerableInterpreter\n"
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$30]], groups=[{0}], aggs=[[COUNT()]], "
+        + "filter=[>($1, 23000)], sort0=[0], dir0=[ASC])";
     sql(sql)
         .returnsOrdered("S=CA; C=24441",
             "S=WA; C=40778")
@@ -1231,10 +1250,8 @@ public class DruidAdapterIT {
         + "from \"foodmart\"\n"
         + "group by \"state_province\", \"city\"\n"
         + "order by c desc limit 2";
-    final String explain = "PLAN="
-        + "EnumerableInterpreter\n"
-        + "  BindableProject(C=[$2], state_province=[$1], city=[$0])\n"
-        + "    DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], groups=[{29, 30}], aggs=[[COUNT()]], sort0=[2], dir0=[DESC], fetch=[2])";
+    final String explain = "BindableProject(C=[$2], state_province=[$0], city=[$1])\n"
+        + "    DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[$30, $29]], groups=[{0, 1}], aggs=[[COUNT()]], sort0=[2], dir0=[DESC], fetch=[2])";
     sql(sql)
         .returnsOrdered("C=7394; state_province=WA; city=Spokane",
             "C=3958; state_province=WA; city=Olympia")
@@ -1258,8 +1275,9 @@ public class DruidAdapterIT {
         + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], groups=[{29, 30}], "
         + "aggs=[[]])";
     final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'default','dimension':'city'},"
-        + "{'type':'default','dimension':'state_province'}],"
+        + "'granularity':'all','dimensions':[{'type':'default','dimension':'city','outputName':'city'"
+        + ",'outputType':'STRING'},"
+        + "{'type':'default','dimension':'state_province','outputName':'state_province','outputType':'STRING'}],"
         + "'limitSpec':{'type':'default'},'aggregations':[],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
     sql(sql)
@@ -1276,16 +1294,15 @@ public class DruidAdapterIT {
     final String sql = "select \"product_name\", 0 as zero\n"
         + "from \"foodmart\"\n"
         + "order by \"product_name\"";
-    final String explain = "PLAN="
-        + "EnumerableInterpreter\n"
-        + "  BindableProject(product_name=[$0], ZERO=[0])\n"
-        + "    BindableSort(sort0=[$0], dir0=[ASC])\n"
-        + "      DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[$3]])";
+    final String explain = "PLAN=EnumerableInterpreter\n"
+        + "  BindableSort(sort0=[$0], dir0=[ASC])\n"
+        + "    DruidQuery(table=[[foodmart, foodmart]], "
+        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[$3, 0]])";
     sql(sql)
         .limit(2)
-        .explainContains(explain)
         .returnsUnordered("product_name=ADJ Rosy Sunglasses; ZERO=0",
-            "product_name=ADJ Rosy Sunglasses; ZERO=0");
+            "product_name=ADJ Rosy Sunglasses; ZERO=0")
+        .explainContains(explain);
   }
 
   @Test public void testFilterDistinct() {
@@ -1295,26 +1312,23 @@ public class DruidAdapterIT {
         + "where \"product_name\" = 'High Top Dried Mushrooms'\n"
         + "and \"quarter\" in ('Q2', 'Q3')\n"
         + "and \"state_province\" = 'WA'";
-    final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},"
-        + "'dimensions':[{'type':'default','dimension':'state_province'},"
-        + "{'type':'default','dimension':'city'},"
-        + "{'type':'default','dimension':'product_name'}],'limitSpec':{'type':'default'},"
-        + "'filter':{'type':'and','fields':[{'type':'selector','dimension':'product_name',"
-        + "'value':'High Top Dried Mushrooms'},{'type':'or','fields':[{'type':'selector',"
+    final String druidQuery1 = "{'queryType':'groupBy','dataSource':'foodmart','granularity':'all'";
+    final String druidQuery2 = "'filter':{'type':'and','fields':[{'type':'selector','dimension':"
+        + "'product_name','value':'High Top Dried Mushrooms'},{'type':'or','fields':[{'type':'selector',"
         + "'dimension':'quarter','value':'Q2'},{'type':'selector','dimension':'quarter',"
         + "'value':'Q3'}]},{'type':'selector','dimension':'state_province','value':'WA'}]},"
         + "'aggregations':[],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
     final String explain = "PLAN=EnumerableInterpreter\n"
-        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]],"
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]],"
         + " filter=[AND(=($3, 'High Top Dried Mushrooms'),"
         + " OR(=($87, 'Q2'),"
         + " =($87, 'Q3')),"
         + " =($30, 'WA'))],"
         + " projects=[[$30, $29, $3]], groups=[{0, 1, 2}], aggs=[[]])\n";
     sql(sql)
-        .queryContains(druidChecker(druidQuery))
+        .queryContains(druidChecker(druidQuery1, druidQuery2))
         .explainContains(explain)
         .returnsUnordered(
             "state_province=WA; city=Bremerton; product_name=High Top Dried Mushrooms",
@@ -1346,7 +1360,6 @@ public class DruidAdapterIT {
         + "{'type':'selector','dimension':'quarter','value':'Q3'}]},"
         + "{'type':'selector','dimension':'state_province','value':'WA'}]},"
         + "'columns':['state_province','city','product_name'],"
-        + "'granularity':{'type':'all'},"
         + "'resultFormat':'compactedList'}";
     final String explain = "PLAN=EnumerableInterpreter\n"
         + "  DruidQuery(table=[[foodmart, foodmart]], "
@@ -1386,13 +1399,13 @@ public class DruidAdapterIT {
         + "from \"foodmart\"\n"
         + "where extract(year from \"timestamp\") = 1997\n"
         + "and extract(month from \"timestamp\") in (4, 6)\n";
-    final String explain = "DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1997-04-01T00:00:00.000Z/1997-05-01T00:00:00.000Z,"
-        + " 1997-06-01T00:00:00.000Z/1997-07-01T00:00:00.000Z]], projects=[[]],"
-        + " groups=[{}], aggs=[[COUNT()]])";
+    final String explain = "PLAN=EnumerableInterpreter\n"
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1997-04-01T00:00:00.000Z/"
+        + "1997-05-01T00:00:00.000Z, 1997-06-01T00:00:00.000Z/1997-07-01T00:00:00.000Z]],"
+        + " projects=[[0]], groups=[{}], aggs=[[COUNT()]])";
     sql(sql)
-        .explainContains(explain)
-        .returnsUnordered("C=13500");
+        .returnsUnordered("C=13500")
+        .explainContains(explain);
   }
 
   @Test public void testFilterSwapped() {
@@ -1425,40 +1438,6 @@ public class DruidAdapterIT {
         .returnsCount(9);
   }
 
-
-  /** Test case for
-   * <a href="https://issues.apache.org/jira/browse/CALCITE-1656">[CALCITE-1656]
-   * Improve cost function in DruidQuery to encourage early column
-   * pruning</a>. */
-  @Test public void testFieldBasedCostColumnPruning() {
-    // A query where filter cannot be pushed to Druid but
-    // the project can still be pushed in order to prune extra columns.
-    String sql = "select \"countryName\", ceil(CAST(\"time\" AS TIMESTAMP) to DAY),\n"
-        + "  cast(count(*) as integer) as c\n"
-        + "from \"wiki\"\n"
-        + "where ceil(\"time\" to DAY) >= '1997-01-01 00:00:00 UTC'\n"
-        + "and ceil(\"time\" to DAY) < '1997-09-01 00:00:00 UTC'\n"
-        + "and \"time\" + INTERVAL '1' DAY > '1997-01-01'\n"
-        + "group by \"countryName\", ceil(CAST(\"time\" AS TIMESTAMP) TO DAY)\n"
-        + "order by c limit 5";
-    String plan = "BindableProject(countryName=[$0], EXPR$1=[$1], C=[CAST($2):INTEGER NOT NULL])\n"
-        + "    BindableSort(sort0=[$2], dir0=[ASC], fetch=[5])\n"
-        + "      BindableAggregate(group=[{0, 1}], agg#0=[COUNT()])\n"
-        + "        BindableProject(countryName=[$1], EXPR$1=[CEIL(CAST($0):TIMESTAMP(0) NOT NULL, FLAG(DAY))])\n"
-        + "          BindableFilter(condition=[>(+($0, 86400000), CAST('1997-01-01'):TIMESTAMP_WITH_LOCAL_TIME_ZONE(0) NOT NULL)])\n"
-        + "            DruidQuery(table=[[wiki, wiki]], intervals=[[1996-12-31T00:00:00.001Z/1997-08-31T00:00:00.001Z]], projects=[[$0, $5]])";
-    // NOTE: Druid query only has countryName as the dimension
-    // being queried after project is pushed to druid query.
-    String druidQuery = "{'queryType':'scan',"
-        + "'dataSource':'wikiticker',"
-        + "'intervals':['1996-12-31T00:00:00.001Z/1997-08-31T00:00:00.001Z'],"
-        + "'columns':['__time','countryName'],"
-        + "'granularity':{'type':'all'},"
-        + "'resultFormat':'compactedList'";
-    sql(sql, WIKI).explainContains(plan);
-    sql(sql, WIKI).queryContains(druidChecker(druidQuery));
-  }
-
   @Test public void testGroupByMetricAndExtractTime() {
     final String sql =
         "SELECT count(*), cast(floor(\"timestamp\" to DAY) as timestamp), \"store_sales\" "
@@ -1484,12 +1463,12 @@ public class DruidAdapterIT {
         + "and \"timestamp\" > '1990-01-01 00:00:00 UTC' "
         + "group by \"timestamp\", \"product_id\" ";
     String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'extraction',"
+        + "'granularity':'all','dimensions':[{'type':'extraction',"
         + "'dimension':'__time','outputName':'extract',"
         + "'extractionFn':{'type':'timeFormat','format':'yyyy-MM-dd";
     sql(sql)
-        .queryContains(druidChecker(druidQuery))
-        .returnsUnordered("product_id=1016; time=1997-01-02 00:00:00");
+        .returnsUnordered("product_id=1016; time=1997-01-02 00:00:00")
+        .queryContains(druidChecker(druidQuery));
   }
 
   @Test public void testPushAggregateOnTimeWithExtractYear() {
@@ -1501,11 +1480,11 @@ public class DruidAdapterIT {
     sql(sql)
         .queryContains(
             druidChecker(
-                ",'granularity':{'type':'all'}",
+                ",'granularity':'all'",
                 "{'type':'extraction',"
                     + "'dimension':'__time','outputName':'extract_year',"
                     + "'extractionFn':{'type':'timeFormat','format':'yyyy',"
-                    + "'timeZone':'UTC','locale':'und'}}"))
+                    + "'timeZone':'UTC','locale':'en-US'}}"))
         .returnsUnordered("year=1997; product_id=1016");
   }
 
@@ -1518,11 +1497,11 @@ public class DruidAdapterIT {
     sql(sql)
         .queryContains(
             druidChecker(
-                ",'granularity':{'type':'all'}",
+                ",'granularity':'all'",
                 "{'type':'extraction',"
                     + "'dimension':'__time','outputName':'extract_month',"
                     + "'extractionFn':{'type':'timeFormat','format':'M',"
-                    + "'timeZone':'UTC','locale':'und'}}"))
+                    + "'timeZone':'UTC','locale':'en-US'}}"))
         .returnsUnordered("month=1; product_id=1016", "month=2; product_id=1016",
             "month=3; product_id=1016", "month=4; product_id=1016", "month=5; product_id=1016");
   }
@@ -1537,20 +1516,17 @@ public class DruidAdapterIT {
     sql(sql)
         .queryContains(
             druidChecker(
-                ",'granularity':{'type':'all'}",
+                ",'granularity':'all'",
                 "{'type':'extraction',"
                     + "'dimension':'__time','outputName':'extract_day',"
                     + "'extractionFn':{'type':'timeFormat','format':'d',"
-                    + "'timeZone':'UTC','locale':'und'}}"))
+                    + "'timeZone':'UTC','locale':'en-US'}}"))
         .returnsUnordered("day=2; product_id=1016", "day=10; product_id=1016",
             "day=13; product_id=1016", "day=16; product_id=1016");
   }
 
-  // Calcite rewrite the extract function in the query as:
-  // rel#85:BindableProject.BINDABLE.[](input=rel#69:Subset#1.BINDABLE.[],
-  // hourOfDay=/INT(MOD(Reinterpret($0), 86400000), 3600000),product_id=$1).
-  // Currently 'EXTRACT( hour from \"timestamp\")' is not pushed to Druid.
-  @Ignore @Test public void testPushAggregateOnTimeWithExtractHourOfDay() {
+  @Test
+  public void testPushAggregateOnTimeWithExtractHourOfDay() {
     String sql =
         "select EXTRACT( hour from \"timestamp\") as \"hourOfDay\",\"product_id\"  from "
             + "\"foodmart\" where \"product_id\" = 1016 and "
@@ -1558,15 +1534,8 @@ public class DruidAdapterIT {
             + "('1997-01-01' as timestamp)" + " group by "
             + " EXTRACT( hour from \"timestamp\"), \"product_id\" ";
     sql(sql)
-        .queryContains(
-            druidChecker(
-                ",'granularity':{'type':'all'}",
-                "{'type':'extraction',"
-                    + "'dimension':'__time','outputName':'extract_0',"
-                    + "'extractionFn':{'type':'timeFormat','format':'H',"
-                    + "'timeZone':'UTC'}}"))
-        .returnsUnordered("month=01; product_id=1016", "month=02; product_id=1016",
-            "month=03; product_id=1016", "month=04; product_id=1016", "month=05; product_id=1016");
+        .queryContains(druidChecker("'queryType':'groupBy'"))
+        .returnsUnordered("hourOfDay=0; product_id=1016");
   }
 
   @Test public void testPushAggregateOnTimeWithExtractYearMonthDay() {
@@ -1581,17 +1550,17 @@ public class DruidAdapterIT {
     sql(sql)
         .queryContains(
             druidChecker(
-                ",'granularity':{'type':'all'}",
+                ",'granularity':'all'",
                 "{'type':'extraction',"
                     + "'dimension':'__time','outputName':'extract_day',"
                     + "'extractionFn':{'type':'timeFormat','format':'d',"
-                    + "'timeZone':'UTC','locale':'und'}}", "{'type':'extraction',"
+                    + "'timeZone':'UTC','locale':'en-US'}}", "{'type':'extraction',"
                     + "'dimension':'__time','outputName':'extract_month',"
                     + "'extractionFn':{'type':'timeFormat','format':'M',"
-                    + "'timeZone':'UTC','locale':'und'}}", "{'type':'extraction',"
+                    + "'timeZone':'UTC','locale':'en-US'}}", "{'type':'extraction',"
                     + "'dimension':'__time','outputName':'extract_year',"
                     + "'extractionFn':{'type':'timeFormat','format':'yyyy',"
-                    + "'timeZone':'UTC','locale':'und'}}"))
+                    + "'timeZone':'UTC','locale':'en-US'}}"))
         .explainContains("PLAN=EnumerableInterpreter\n"
             + "  DruidQuery(table=[[foodmart, foodmart]], "
             + "intervals=[[1997-01-01T00:00:00.001Z/1997-01-20T00:00:00.000Z]], "
@@ -1615,16 +1584,16 @@ public class DruidAdapterIT {
     sql(sql)
         .queryContains(
             druidChecker(
-                ",'granularity':{'type':'all'}", "{'type':'extraction',"
+                ",'granularity':'all'", "{'type':'extraction',"
                     + "'dimension':'__time','outputName':'extract_day',"
                     + "'extractionFn':{'type':'timeFormat','format':'d',"
-                    + "'timeZone':'UTC','locale':'und'}}", "{'type':'extraction',"
+                    + "'timeZone':'UTC','locale':'en-US'}}", "{'type':'extraction',"
                     + "'dimension':'__time','outputName':'extract_month',"
                     + "'extractionFn':{'type':'timeFormat','format':'M',"
-                    + "'timeZone':'UTC','locale':'und'}}", "{'type':'extraction',"
+                    + "'timeZone':'UTC','locale':'en-US'}}", "{'type':'extraction',"
                     + "'dimension':'__time','outputName':'extract_year',"
                     + "'extractionFn':{'type':'timeFormat','format':'yyyy',"
-                    + "'timeZone':'UTC','locale':'und'}}"))
+                    + "'timeZone':'UTC','locale':'en-US'}}"))
         .explainContains("PLAN=EnumerableInterpreter\n"
             + "  DruidQuery(table=[[foodmart, foodmart]], "
             + "intervals=[[1997-01-01T00:00:00.001Z/1997-01-20T00:00:00.000Z]], "
@@ -1647,10 +1616,10 @@ public class DruidAdapterIT {
     sql(sql)
         .queryContains(
             druidChecker(
-                ",'granularity':{'type':'all'}", "{'type':'extraction',"
+                ",'granularity':'all'", "{'type':'extraction',"
                     + "'dimension':'__time','outputName':'extract_day',"
                     + "'extractionFn':{'type':'timeFormat','format':'d',"
-                    + "'timeZone':'UTC','locale':'und'}}"))
+                    + "'timeZone':'UTC','locale':'en-US'}}"))
         .explainContains("PLAN=EnumerableInterpreter\n"
             + "  DruidQuery(table=[[foodmart, foodmart]], "
             + "intervals=[[1997-01-01T00:00:00.001Z/1997-01-20T00:00:00.000Z]], "
@@ -1665,17 +1634,14 @@ public class DruidAdapterIT {
         + "where EXTRACT( year from \"timestamp\") = 1997 and "
         + "\"cases_per_pallet\" >= 8 and \"cases_per_pallet\" <= 10 and "
         + "\"units_per_case\" < 15 ";
-    String druidQuery = "{'queryType':'timeseries','dataSource':'foodmart',"
-        + "'descending':false,'granularity':{'type':'all'},'filter':{'type':'and',"
-        + "'fields':[{'type':'bound','dimension':'cases_per_pallet','lower':'8',"
-        + "'lowerStrict':false,'ordering':'numeric'},{'type':'bound',"
-        + "'dimension':'cases_per_pallet','upper':'10','upperStrict':false,"
-        + "'ordering':'numeric'},{'type':'bound','dimension':'units_per_case',"
-        + "'upper':'15','upperStrict':true,'ordering':'numeric'}]},"
-        + "'aggregations':[{'type':'doubleSum',"
-        + "'name':'EXPR$0','fieldName':'store_sales'}],"
-        + "'intervals':['1997-01-01T00:00:00.000Z/1998-01-01T00:00:00.000Z'],"
-        + "'context':{'skipEmptyBuckets':true}}";
+    String druidQuery = "{'queryType':'timeseries','dataSource':'foodmart','descending':false,"
+        + "'granularity':'all','filter':{'type':'and','fields':[{'type':'bound','dimension':"
+        + "'cases_per_pallet','lower':'8','lowerStrict':false,'ordering':'numeric'},"
+        + "{'type':'bound','dimension':'cases_per_pallet','upper':'10','upperStrict':false,"
+        + "'ordering':'numeric'},{'type':'bound','dimension':'units_per_case','upper':'15',"
+        + "'upperStrict':true,'ordering':'numeric'}]},'aggregations':[{'type':'doubleSum',"
+        + "'name':'EXPR$0','fieldName':'store_sales'}],'intervals':['1997-01-01T00:00:00.000Z/"
+        + "1998-01-01T00:00:00.000Z'],'context':{'skipEmptyBuckets':true}}";
     sql(sql)
         .explainContains("PLAN=EnumerableInterpreter\n"
             + "  DruidQuery(table=[[foodmart, foodmart]], "
@@ -1683,8 +1649,8 @@ public class DruidAdapterIT {
             + "filter=[AND(>=(CAST($11):BIGINT, 8), <=(CAST($11):BIGINT, 10), "
             + "<(CAST($10):BIGINT, 15))], groups=[{}], "
             + "aggs=[[SUM($90)]])\n")
-        .queryContains(druidChecker(druidQuery))
-        .returnsUnordered("EXPR$0=75364.1");
+        .returnsUnordered("EXPR$0=75364.1")
+        .queryContains(druidChecker(druidQuery));
   }
 
   @Test public void testPushOfFilterExtractionOnDayAndMonth() {
@@ -1694,24 +1660,6 @@ public class DruidAdapterIT {
         + "AND  \"product_id\" >= 1549 group by \"product_id\", EXTRACT(day from "
         + "\"timestamp\"), EXTRACT(month from \"timestamp\")";
     sql(sql)
-        .queryContains(
-            druidChecker("{'queryType':'groupBy','dataSource':'foodmart',"
-                + "'granularity':{'type':'all'},'dimensions':[{'type':'default',"
-                + "'dimension':'product_id'},{'type':'extraction','dimension':'__time',"
-                + "'outputName':'extract_day','extractionFn':{'type':'timeFormat',"
-                + "'format':'d','timeZone':'UTC','locale':'und'}},{'type':'extraction',"
-                + "'dimension':'__time','outputName':'extract_month',"
-                + "'extractionFn':{'type':'timeFormat','format':'M','timeZone':'UTC',"
-                + "'locale':'und'}}],'limitSpec':{'type':'default'},"
-                + "'filter':{'type':'and','fields':[{'type':'bound',"
-                + "'dimension':'product_id','lower':'1549','lowerStrict':false,"
-                + "'ordering':'numeric'},{'type':'bound','dimension':'__time',"
-                + "'lower':'30','lowerStrict':false,'ordering':'numeric',"
-                + "'extractionFn':{'type':'timeFormat','format':'d','timeZone':'UTC',"
-                + "'locale':'und'}},{'type':'selector','dimension':'__time',"
-                + "'value':'11','extractionFn':{'type':'timeFormat','format':'M',"
-                + "'timeZone':'UTC','locale':'und'}}]},'aggregations':[],"
-                + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}"))
         .returnsUnordered("product_id=1549; EXPR$1=30; EXPR$2=11",
             "product_id=1553; EXPR$1=30; EXPR$2=11");
   }
@@ -1724,45 +1672,17 @@ public class DruidAdapterIT {
         + "group by \"product_id\", EXTRACT(day from \"timestamp\"), "
         + "EXTRACT(month from \"timestamp\"), EXTRACT(year from \"timestamp\")";
     sql(sql)
-        .queryContains(
-            druidChecker("{'queryType':'groupBy','dataSource':'foodmart',"
-                + "'granularity':{'type':'all'},'dimensions':[{'type':'default',"
-                + "'dimension':'product_id'},{'type':'extraction','dimension':'__time',"
-                + "'outputName':'extract_day','extractionFn':{'type':'timeFormat',"
-                + "'format':'d','timeZone':'UTC','locale':'und'}},{'type':'extraction',"
-                + "'dimension':'__time','outputName':'extract_month',"
-                + "'extractionFn':{'type':'timeFormat','format':'M','timeZone':'UTC',"
-                + "'locale':'und'}},{'type':'extraction','dimension':'__time',"
-                + "'outputName':'extract_year','extractionFn':{'type':'timeFormat',"
-                + "'format':'yyyy','timeZone':'UTC','locale':'und'}}],"
-                + "'limitSpec':{'type':'default'},'filter':{"
-                + "'type':'bound','dimension':'product_id','lower':'1549',"
-                + "'lowerStrict':false,'ordering':'numeric'},"
-                + "'aggregations':[],"
-                + "'intervals':['1997-11-30T00:00:00.000Z/1997-12-01T00:00:00.000Z']}"))
         .returnsUnordered("product_id=1549; EXPR$1=30; EXPR$2=11; EXPR$3=1997",
-            "product_id=1553; EXPR$1=30; EXPR$2=11; EXPR$3=1997");
+            "product_id=1553; EXPR$1=30; EXPR$2=11; EXPR$3=1997")
+        .queryContains(
+            druidChecker("{'queryType':'groupBy','dataSource':'foodmart','granularity':'all'"));
   }
 
   @Test public void testFilterExtractionOnMonthWithBetween() {
     String sqlQuery = "SELECT \"product_id\", EXTRACT(month from \"timestamp\") FROM \"foodmart\""
         + " WHERE EXTRACT(month from \"timestamp\") BETWEEN 10 AND 11 AND  \"product_id\" >= 1558"
         + " GROUP BY \"product_id\", EXTRACT(month from \"timestamp\")";
-    String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'default',"
-        + "'dimension':'product_id'},{'type':'extraction','dimension':'__time',"
-        + "'outputName':'extract_month','extractionFn':{'type':'timeFormat',"
-        + "'format':'M','timeZone':'UTC','locale':'und'}}],"
-        + "'limitSpec':{'type':'default'},'filter':{'type':'and',"
-        + "'fields':[{'type':'bound','dimension':'product_id','lower':'1558',"
-        + "'lowerStrict':false,'ordering':'numeric'},{'type':'bound',"
-        + "'dimension':'__time','lower':'10','lowerStrict':false,"
-        + "'ordering':'numeric','extractionFn':{'type':'timeFormat','format':'M',"
-        + "'timeZone':'UTC','locale':'und'}},{'type':'bound',"
-        + "'dimension':'__time','upper':'11','upperStrict':false,"
-        + "'ordering':'numeric','extractionFn':{'type':'timeFormat','format':'M',"
-        + "'timeZone':'UTC','locale':'und'}}]},'aggregations':[],"
-        + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
+    String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart'";
     sql(sqlQuery)
         .returnsUnordered("product_id=1558; EXPR$1=10", "product_id=1558; EXPR$1=11",
             "product_id=1559; EXPR$1=11")
@@ -1774,27 +1694,29 @@ public class DruidAdapterIT {
         + " WHERE EXTRACT(month from \"timestamp\") IN (10, 11) AND  \"product_id\" >= 1558"
         + " GROUP BY \"product_id\", EXTRACT(month from \"timestamp\")";
     sql(sqlQuery)
+        .returnsUnordered("product_id=1558; EXPR$1=10", "product_id=1558; EXPR$1=11",
+            "product_id=1559; EXPR$1=11")
         .queryContains(
             druidChecker("{'queryType':'groupBy',"
-                + "'dataSource':'foodmart','granularity':{'type':'all'},"
-                + "'dimensions':[{'type':'default','dimension':'product_id'},"
+                + "'dataSource':'foodmart','granularity':'all',"
+                + "'dimensions':[{'type':'default','dimension':'product_id','outputName':'product_id','outputType':'STRING'},"
                 + "{'type':'extraction','dimension':'__time','outputName':'extract_month',"
                 + "'extractionFn':{'type':'timeFormat','format':'M','timeZone':'UTC',"
-                + "'locale':'und'}}],'limitSpec':{'type':'default'},"
+                + "'locale':'en-US'}}],'limitSpec':{'type':'default'},"
                 + "'filter':{'type':'and','fields':[{'type':'bound',"
                 + "'dimension':'product_id','lower':'1558','lowerStrict':false,"
-                + "'ordering':'numeric'},{'type':'or','fields':[{'type':'selector',"
-                + "'dimension':'__time','value':'10','extractionFn':{'type':'timeFormat',"
-                + "'format':'M','timeZone':'UTC','locale':'und'}},{'type':'selector',"
-                + "'dimension':'__time','value':'11','extractionFn':{'type':'timeFormat',"
-                + "'format':'M','timeZone':'UTC','locale':'und'}}]}]},"
+                + "'ordering':'numeric'},{'type':'or','fields':[{'type':'bound','dimension':'__time'"
+                + ",'lower':'10','lowerStrict':false,'upper':'10','upperStrict':false,"
+                + "'ordering':'numeric','extractionFn':{'type':'timeFormat',"
+                + "'format':'M','timeZone':'UTC','locale':'en-US'}},{'type':'bound',"
+                + "'dimension':'__time','lower':'11','lowerStrict':false,'upper':'11',"
+                + "'upperStrict':false,'ordering':'numeric','extractionFn':{'type':'timeFormat',"
+                + "'format':'M','timeZone':'UTC','locale':'en-US'}}]}]},"
                 + "'aggregations':[],"
-                + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}"))
-        .returnsUnordered("product_id=1558; EXPR$1=10", "product_id=1558; EXPR$1=11",
-            "product_id=1559; EXPR$1=11");
+                + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}"));
   }
 
-  @Test public void testPushofOrderByWithMonthExtract() {
+  @Test public void testPushOfOrderByWithMonthExtract() {
     String sqlQuery = "SELECT  extract(month from \"timestamp\") as m , \"product_id\", SUM"
         + "(\"unit_sales\") as s FROM \"foodmart\""
         + " WHERE \"product_id\" >= 1558"
@@ -1802,15 +1724,16 @@ public class DruidAdapterIT {
         + "\"product_id\"";
     sql(sqlQuery).queryContains(
         druidChecker("{'queryType':'groupBy','dataSource':'foodmart',"
-            + "'granularity':{'type':'all'},'dimensions':[{'type':'extraction',"
+            + "'granularity':'all','dimensions':[{'type':'extraction',"
             + "'dimension':'__time','outputName':'extract_month',"
             + "'extractionFn':{'type':'timeFormat','format':'M','timeZone':'UTC',"
-            + "'locale':'und'}},{'type':'default','dimension':'product_id'}],"
+            + "'locale':'en-US'}},{'type':'default','dimension':'product_id','outputName':"
+            + "'product_id','outputType':'STRING'}],"
             + "'limitSpec':{'type':'default','columns':[{'dimension':'extract_month',"
             + "'direction':'ascending','dimensionOrder':'numeric'},{'dimension':'S',"
             + "'direction':'ascending','dimensionOrder':'numeric'},"
             + "{'dimension':'product_id','direction':'ascending',"
-            + "'dimensionOrder':'alphanumeric'}]},'filter':{'type':'bound',"
+            + "'dimensionOrder':'lexicographic'}]},'filter':{'type':'bound',"
             + "'dimension':'product_id','lower':'1558','lowerStrict':false,"
             + "'ordering':'numeric'},'aggregations':[{'type':'longSum','name':'S',"
             + "'fieldName':'unit_sales'}],"
@@ -1830,28 +1753,30 @@ public class DruidAdapterIT {
         + "group by floor(\"timestamp\" to MONTH)\n"
         + "order by \"month\" DESC";
     sql(sql)
-        .explainContains("DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[FLOOR($0, "
-        + "FLAG(MONTH))]], groups=[{0}], aggs=[[]], sort0=[0], dir0=[DESC])")
-        .queryContains(druidChecker("'queryType':'timeseries'", "'descending':true"));
+        .queryContains(druidChecker("'queryType':'timeseries'", "'descending':true"))
+        .explainContains("PLAN=EnumerableInterpreter\n"
+            + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z"
+            + "/2992-01-10T00:00:00.000Z]], projects=[[FLOOR($0, FLAG(MONTH))]], groups=[{0}], "
+            + "aggs=[[]], post_projects=[[CAST($0):TIMESTAMP(0) NOT NULL]], sort0=[0], dir0=[DESC])");
+
   }
 
   @Test public void testGroupByFloorTimeWithLimit() {
     final String sql =
-        "select cast(floor(\"timestamp\" to MONTH) as timestamp) as \"floor_month\"\n"
+        "select cast(floor(\"timestamp\" to MONTH) as timestamp) as \"floorOfMonth\"\n"
         + "from \"foodmart\"\n"
         + "group by floor(\"timestamp\" to MONTH)\n"
-        + "order by \"floor_month\" DESC LIMIT 3";
+        + "order by \"floorOfMonth\" DESC LIMIT 3";
     final String explain =
-        "    BindableSort(sort0=[$0], dir0=[DESC], fetch=[3])\n"
-        + "      DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], "
-        + "projects=[[FLOOR($0, FLAG(MONTH))]], groups=[{0}], aggs=[[]], "
-        + "sort0=[0], dir0=[DESC])";
-    sql(sql).explainContains(explain)
-        .queryContains(druidChecker("'queryType':'timeseries'", "'descending':true"))
-        .returnsOrdered("floor_month=1997-12-01 00:00:00", "floor_month=1997-11-01 00:00:00",
-            "floor_month=1997-10-01 00:00:00");
+        "PLAN=EnumerableInterpreter\n"
+            + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+            + "2992-01-10T00:00:00.000Z]], projects=[[FLOOR($0, FLAG(MONTH))]], groups=[{0}], "
+            + "aggs=[[]], post_projects=[[CAST($0):TIMESTAMP(0) NOT NULL]], sort0=[0], dir0=[DESC], fetch=[3])";
+    sql(sql)
+        .explainContains(explain)
+        .returnsOrdered("floorOfMonth=1997-12-01 00:00:00", "floorOfMonth=1997-11-01 00:00:00",
+            "floorOfMonth=1997-10-01 00:00:00")
+        .queryContains(druidChecker("'queryType':'groupBy'", "'direction':'descending'"));
   }
 
   @Test public void testPushofOrderByYearWithYearMonthExtract() {
@@ -1869,19 +1794,20 @@ public class DruidAdapterIT {
         + "sort1=[1], sort2=[3], sort3=[2], dir0=[DESC], "
         + "dir1=[ASC], dir2=[DESC], dir3=[ASC], fetch=[3])";
     final String expectedDruidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'extraction',"
+        + "'granularity':'all','dimensions':[{'type':'extraction',"
         + "'dimension':'__time','outputName':'extract_year',"
         + "'extractionFn':{'type':'timeFormat','format':'yyyy','timeZone':'UTC',"
-        + "'locale':'und'}},{'type':'extraction','dimension':'__time',"
+        + "'locale':'en-US'}},{'type':'extraction','dimension':'__time',"
         + "'outputName':'extract_month','extractionFn':{'type':'timeFormat',"
-        + "'format':'M','timeZone':'UTC','locale':'und'}},{'type':'default',"
-        + "'dimension':'product_id'}],'limitSpec':{'type':'default','limit':3,"
+        + "'format':'M','timeZone':'UTC','locale':'en-US'}},{'type':'default',"
+        + "'dimension':'product_id','outputName':'product_id','outputType':'STRING'}],"
+        + "'limitSpec':{'type':'default','limit':3,"
         + "'columns':[{'dimension':'extract_year','direction':'descending',"
         + "'dimensionOrder':'numeric'},{'dimension':'extract_month',"
         + "'direction':'ascending','dimensionOrder':'numeric'},{'dimension':'S',"
         + "'direction':'descending','dimensionOrder':'numeric'},"
         + "{'dimension':'product_id','direction':'ascending',"
-        + "'dimensionOrder':'alphanumeric'}]},'filter':{'type':'bound',"
+        + "'dimensionOrder':'lexicographic'}]},'filter':{'type':'bound',"
         + "'dimension':'product_id','lower':'1558','lowerStrict':false,"
         + "'ordering':'numeric'},'aggregations':[{'type':'longSum','name':'S',"
         + "'fieldName':'unit_sales'}],"
@@ -1903,26 +1829,12 @@ public class DruidAdapterIT {
         + "filter=[>=(CAST($1):BIGINT, 1558)], projects=[[EXTRACT(FLAG(YEAR), $0), "
         + "EXTRACT(FLAG(MONTH), $0), $1, $89]], groups=[{0, 1, 2}], aggs=[[SUM($3)]], "
         + "sort0=[3], sort1=[1], sort2=[2], dir0=[DESC], dir1=[DESC], dir2=[ASC], fetch=[3])";
-    final String expectedDruidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'extraction',"
-        + "'dimension':'__time','outputName':'extract_year',"
-        + "'extractionFn':{'type':'timeFormat','format':'yyyy','timeZone':'UTC',"
-        + "'locale':'und'}},{'type':'extraction','dimension':'__time',"
-        + "'outputName':'extract_month','extractionFn':{'type':'timeFormat',"
-        + "'format':'M','timeZone':'UTC','locale':'und'}},{'type':'default',"
-        + "'dimension':'product_id'}],'limitSpec':{'type':'default','limit':3,"
-        + "'columns':[{'dimension':'S','direction':'descending',"
-        + "'dimensionOrder':'numeric'},{'dimension':'extract_month',"
-        + "'direction':'descending','dimensionOrder':'numeric'},"
-        + "{'dimension':'product_id','direction':'ascending',"
-        + "'dimensionOrder':'alphanumeric'}]},'filter':{'type':'bound',"
-        + "'dimension':'product_id','lower':'1558','lowerStrict':false,"
-        + "'ordering':'numeric'},'aggregations':[{'type':'longSum','name':'S',"
-        + "'fieldName':'unit_sales'}],"
-        + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
-    sql(sqlQuery).explainContains(expectedPlan).queryContains(druidChecker(expectedDruidQuery))
+    final String expectedDruidQueryType = "'queryType':'groupBy'";
+    sql(sqlQuery)
         .returnsOrdered("Y=1997; M=12; product_id=1558; S=30", "Y=1997; M=3; product_id=1558; S=29",
-            "Y=1997; M=5; product_id=1558; S=27");
+            "Y=1997; M=5; product_id=1558; S=27")
+        .explainContains(expectedPlan)
+        .queryContains(druidChecker(expectedDruidQueryType));
   }
 
   @Test public void testGroupByTimeSortOverMetrics() {
@@ -1930,13 +1842,6 @@ public class DruidAdapterIT {
         + " cast(floor(\"timestamp\" to month) as timestamp)"
         + " FROM \"foodmart\" group by floor(\"timestamp\" to month) order by s DESC";
     sql(sqlQuery)
-        .explainContains("PLAN=EnumerableInterpreter\n"
-        + "  BindableSort(sort0=[$1], dir0=[DESC])\n"
-        + "    BindableProject(C=[$1], S=[$2], EXPR$2=[CAST($0):TIMESTAMP(0) NOT NULL])\n"
-        + "      DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], projects=[[FLOOR($0, "
-        + "FLAG(MONTH)), $89]], groups=[{0}], aggs=[[COUNT(), SUM($1)]])")
-        .queryContains(druidChecker("'queryType':'timeseries'"))
         .returnsOrdered("C=8716; S=26796; EXPR$2=1997-12-01 00:00:00",
         "C=8231; S=25270; EXPR$2=1997-11-01 00:00:00",
         "C=7752; S=23763; EXPR$2=1997-07-01 00:00:00",
@@ -1948,7 +1853,13 @@ public class DruidAdapterIT {
         "C=6844; S=20957; EXPR$2=1997-02-01 00:00:00",
         "C=6662; S=20388; EXPR$2=1997-09-01 00:00:00",
         "C=6588; S=20179; EXPR$2=1997-04-01 00:00:00",
-        "C=6478; S=19958; EXPR$2=1997-10-01 00:00:00");
+        "C=6478; S=19958; EXPR$2=1997-10-01 00:00:00")
+        .queryContains(druidChecker("'queryType':'groupBy'"))
+        .explainContains("PLAN=EnumerableInterpreter\n"
+            + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+            + "2992-01-10T00:00:00.000Z]], projects=[[FLOOR($0, FLAG(MONTH)), $89]], groups=[{0}], "
+            + "aggs=[[COUNT(), SUM($1)]], post_projects=[[$1, $2, CAST($0):TIMESTAMP(0) NOT NULL]],"
+            + " sort0=[1], dir0=[DESC])");
   }
 
   @Test public void testNumericOrderingOfOrderByOperatorFullTime() {
@@ -1957,7 +1868,7 @@ public class DruidAdapterIT {
         + "\"foodmart\" group by \"timestamp\" order by \"timestamp\" DESC, c DESC, s LIMIT 5";
     final String druidSubQuery = "'limitSpec':{'type':'default','limit':5,"
         + "'columns':[{'dimension':'extract','direction':'descending',"
-        + "'dimensionOrder':'alphanumeric'},{'dimension':'C',"
+        + "'dimensionOrder':'lexicographic'},{'dimension':'C',"
         + "'direction':'descending','dimensionOrder':'numeric'},{'dimension':'S',"
         + "'direction':'ascending','dimensionOrder':'numeric'}]},"
         + "'aggregations':[{'type':'count','name':'C'},{'type':'longSum',"
@@ -1993,7 +1904,7 @@ public class DruidAdapterIT {
         + "\"foodmart\" group by \"brand_name\" order by \"brand_name\"  DESC LIMIT 5";
     final String druidSubQuery = "'limitSpec':{'type':'default','limit':5,"
         + "'columns':[{'dimension':'brand_name','direction':'descending',"
-        + "'dimensionOrder':'alphanumeric'}]}";
+        + "'dimensionOrder':'lexicographic'}]}";
     sql(sqlQuery).returnsOrdered("brand_name=Washington; C=576; S=1775\nbrand_name=Walrus; C=457;"
         + " S=1399\nbrand_name=Urban; C=299; S=924\nbrand_name=Tri-State; C=2339; "
         + "S=7270\nbrand_name=Toucan; C=123; S=380").queryContains(druidChecker(druidSubQuery));
@@ -2002,21 +1913,24 @@ public class DruidAdapterIT {
 
   @Test public void testGroupByWeekExtract() {
     final String sql = "SELECT extract(week from \"timestamp\") from \"foodmart\" where "
-        + "\"product_id\" = 1558 and extract(week from \"timestamp\") IN (10, 11)group by extract"
+        + "\"product_id\" = 1558 and extract(week from \"timestamp\") IN (10, 11) group by extract"
         + "(week from \"timestamp\")";
 
     final String druidQuery = "{'queryType':'groupBy','dataSource':'foodmart',"
-        + "'granularity':{'type':'all'},'dimensions':[{'type':'extraction',"
+        + "'granularity':'all','dimensions':[{'type':'extraction',"
         + "'dimension':'__time','outputName':'extract_week',"
         + "'extractionFn':{'type':'timeFormat','format':'w','timeZone':'UTC',"
-        + "'locale':'und'}}],'limitSpec':{'type':'default'},"
-        + "'filter':{'type':'and','fields':[{'type':'selector',"
-        + "'dimension':'product_id','value':'1558'},{'type':'or',"
-        + "'fields':[{'type':'selector','dimension':'__time','value':'10',"
+        + "'locale':'en-US'}}],'limitSpec':{'type':'default'},"
+        + "'filter':{'type':'and','fields':[{'type':'bound','dimension':'product_id',"
+        + "'lower':'1558','lowerStrict':false,'upper':'1558','upperStrict':false,"
+        + "'ordering':'numeric'},{'type':'or',"
+        + "'fields':[{'type':'bound','dimension':'__time','lower':'10','lowerStrict':false,"
+        + "'upper':'10','upperStrict':false,'ordering':'numeric',"
         + "'extractionFn':{'type':'timeFormat','format':'w','timeZone':'UTC',"
-        + "'locale':'und'}},{'type':'selector','dimension':'__time',"
-        + "'value':'11','extractionFn':{'type':'timeFormat','format':'w',"
-        + "'timeZone':'UTC','locale':'und'}}]}]},"
+        + "'locale':'en-US'}},{'type':'bound','dimension':'__time','lower':'11','lowerStrict':false,"
+        + "'upper':'11','upperStrict':false,'ordering':'numeric',"
+        + "'extractionFn':{'type':'timeFormat','format':'w',"
+        + "'timeZone':'UTC','locale':'en-US'}}]}]},"
         + "'aggregations':[],"
         + "'intervals':['1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z']}";
     sql(sql).returnsOrdered("EXPR$0=10\nEXPR$0=11").queryContains(druidChecker(druidQuery));
@@ -2131,94 +2045,82 @@ public class DruidAdapterIT {
   @Test public void testPlusArithmeticOperation() {
     final String sqlQuery = "select sum(\"store_sales\") + sum(\"store_cost\") as a, "
         + "\"store_state\" from \"foodmart\"  group by \"store_state\" order by a desc";
-    String postAggString = "'postAggregations':[{'type':'arithmetic','name':'postagg#0','fn':'+',"
-        + "'fields':[{'type':'fieldAccess','name':'','fieldName':'$f1'},{'type':'fieldAccess','"
-        + "name':'','fieldName':'$f2'}]}]";
+    String postAggString = "type':'expression','name':'A','expression':'(\\'$f1\\' + \\'$f2\\')'}]";
     final String plan = "PLAN=EnumerableInterpreter\n"
-        + "  DruidQuery(table=[[foodmart, foodmart]], "
-        + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], "
-        + "groups=[{63}], aggs=[[SUM($90), SUM($91)]], post_projects=[[+($1, $2), $0]], "
-        + "sort0=[0], dir0=[DESC]";
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$63, $90, $91]], groups=[{0}], "
+        + "aggs=[[SUM($1), SUM($2)]], post_projects=[[+($1, $2), $0]], sort0=[0], dir0=[DESC])";
     sql(sqlQuery, FOODMART)
-        .explainContains(plan)
-        .queryContains(druidChecker(postAggString))
         .returnsOrdered("A=369117.52790000016; store_state=WA",
             "A=222698.26509999996; store_state=CA",
-            "A=199049.57059999998; store_state=OR");
+            "A=199049.57059999998; store_state=OR")
+        .explainContains(plan)
+        .queryContains(druidChecker(postAggString));
   }
 
   @Test public void testDivideArithmeticOperation() {
     final String sqlQuery = "select \"store_state\", sum(\"store_sales\") / sum(\"store_cost\") "
         + "as a from \"foodmart\"  group by \"store_state\" order by a desc";
-    String postAggString = "'postAggregations':[{'type':'arithmetic','name':'postagg#0',"
-        + "'fn':'quotient','fields':[{'type':'fieldAccess','name':'','fieldName':'$f1'},"
-        + "{'type':'fieldAccess','name':'','fieldName':'$f2'}]}]";
+    String postAggString = "[{'type':'expression','name':'A','expression':'(\\'$f1\\' / \\'$f2\\')";
     final String plan = "PLAN=EnumerableInterpreter\n"
-            + "  DruidQuery(table=[[foodmart, foodmart]], "
-            + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], "
-            + "groups=[{63}], aggs=[[SUM($90), SUM($91)]], post_projects=[[$0, /($1, $2)]], "
-            + "sort0=[1], dir0=[DESC]";
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$63, $90, $91]], groups=[{0}], "
+        + "aggs=[[SUM($1), SUM($2)]], post_projects=[[$0, /($1, $2)]], sort0=[1], dir0=[DESC])";
     sql(sqlQuery, FOODMART)
-        .explainContains(plan)
-        .queryContains(druidChecker(postAggString))
         .returnsOrdered("store_state=OR; A=2.506091302943239",
             "store_state=CA; A=2.505379741272971",
-            "store_state=WA; A=2.5045806163801996");
+            "store_state=WA; A=2.5045806163801996")
+        .explainContains(plan)
+        .queryContains(druidChecker(postAggString));
   }
 
   @Test public void testMultiplyArithmeticOperation() {
     final String sqlQuery = "select \"store_state\", sum(\"store_sales\") * sum(\"store_cost\") "
         + "as a from \"foodmart\"  group by \"store_state\" order by a desc";
-    String postAggString = "'postAggregations':[{'type':'arithmetic','name':'postagg#0',"
-        + "'fn':'*','fields':[{'type':'fieldAccess','name':'','fieldName':'$f1'},"
-        + "{'type':'fieldAccess','name':'','fieldName':'$f2'}]}]";
+    String postAggString = "{'type':'expression','name':'A','expression':'(\\'$f1\\' * \\'$f2\\')'";
     final String plan = "PLAN=EnumerableInterpreter\n"
-            + "  DruidQuery(table=[[foodmart, foodmart]], "
-            + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], "
-            + "groups=[{63}], aggs=[[SUM($90), SUM($91)]], post_projects=[[$0, *($1, $2)]], "
-            + "sort0=[1], dir0=[DESC]";
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$63, $90, $91]], groups=[{0}], aggs=[[SUM($1),"
+        + " SUM($2)]], post_projects=[[$0, *($1, $2)]], sort0=[1], dir0=[DESC])";
     sql(sqlQuery, FOODMART)
-        .explainContains(plan)
-        .queryContains(druidChecker(postAggString))
         .returnsOrdered("store_state=WA; A=2.7783838325212463E10",
             "store_state=CA; A=1.0112000537448784E10",
-            "store_state=OR; A=8.077425041941243E9");
+            "store_state=OR; A=8.077425041941243E9")
+        .explainContains(plan)
+        .queryContains(druidChecker(postAggString));
   }
 
   @Test public void testMinusArithmeticOperation() {
     final String sqlQuery = "select \"store_state\", sum(\"store_sales\") - sum(\"store_cost\") "
         + "as a from \"foodmart\"  group by \"store_state\" order by a desc";
-    String postAggString = "'postAggregations':[{'type':'arithmetic','name':'postagg#0',"
-        + "'fn':'-','fields':[{'type':'fieldAccess','name':'','fieldName':'$f1'},"
-        + "{'type':'fieldAccess','name':'','fieldName':'$f2'}]}]";
+    String postAggString = "'postAggregations':[{'type':'expression','name':'A',"
+        + "'expression':'(\\'$f1\\' - \\'$f2\\')'}]";
     final String plan = "PLAN=EnumerableInterpreter\n"
-            + "  DruidQuery(table=[[foodmart, foodmart]], "
-            + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], "
-            + "groups=[{63}], aggs=[[SUM($90), SUM($91)]], post_projects=[[$0, -($1, $2)]], "
-            + "sort0=[1], dir0=[DESC]";
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$63, $90, $91]], groups=[{0}], aggs=[[SUM($1), "
+        + "SUM($2)]], post_projects=[[$0, -($1, $2)]], sort0=[1], dir0=[DESC])";
     sql(sqlQuery, FOODMART)
-        .explainContains(plan)
-        .queryContains(druidChecker(postAggString))
         .returnsOrdered("store_state=WA; A=158468.91210000002",
             "store_state=CA; A=95637.41489999992",
-            "store_state=OR; A=85504.56939999988");
+            "store_state=OR; A=85504.56939999988")
+        .explainContains(plan)
+        .queryContains(druidChecker(postAggString));
   }
 
   @Test public void testConstantPostAggregator() {
     final String sqlQuery = "select \"store_state\", sum(\"store_sales\") + 100 as a from "
         + "\"foodmart\"  group by \"store_state\" order by a desc";
-    String postAggString = "{'type':'constant','name':'','value':100.0}";
+    String postAggString = "{'type':'expression','name':'A','expression':'(\\'$f1\\' + 100)'}";
     final String plan = "PLAN=EnumerableInterpreter\n"
-            + "  DruidQuery(table=[[foodmart, foodmart]], "
-            + "intervals=[[1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z]], "
-            + "groups=[{63}], aggs=[[SUM($90)]], post_projects=[[$0, +($1, 100)]], "
-            + "sort0=[1], dir0=[DESC]";
+        + "  DruidQuery(table=[[foodmart, foodmart]], intervals=[[1900-01-09T00:00:00.000Z/"
+        + "2992-01-10T00:00:00.000Z]], projects=[[$63, $90]], groups=[{0}], aggs=[[SUM($1)]], "
+        + "post_projects=[[$0, +($1, 100)]], sort0=[1], dir0=[DESC])";
     sql(sqlQuery, FOODMART)
-        .explainContains(plan)
-        .queryContains(druidChecker(postAggString))
         .returnsOrdered("store_state=WA; A=263893.2200000001",
             "store_state=CA; A=159267.83999999994",
-            "store_state=OR; A=142377.06999999992");
+            "store_state=OR; A=142377.06999999992")
+        .explainContains(plan)
+        .queryContains(druidChecker(postAggString));
   }
 
   @Test public void testRecursiveArithmeticOperation() {
@@ -2226,26 +2128,19 @@ public class DruidAdapterIT {
         + "(sum(\"store_sales\")-sum(\"store_cost\")) / (count(*) * 3) "
         + "AS a,sum(\"unit_sales\") AS b, \"store_state\"  from \"foodmart\"  group "
         + "by \"store_state\") order by c desc";
-    String postAggString = "'postAggregations':[{'type':'arithmetic','name':'postagg#0',"
-        + "'fn':'*','fields':[{'type':'constant','name':'','value':-1.0},{'type':"
-        + "'arithmetic','name':'','fn':'+','fields':[{'type':'arithmetic','name':"
-        + "'','fn':'quotient','fields':[{'type':'arithmetic','name':'','fn':'-',"
-        + "'fields':[{'type':'fieldAccess','name':'','fieldName':'$f1'},{'type':"
-        + "'fieldAccess','name':'','fieldName':'$f2'}]},{'type':'arithmetic','name':"
-        + "'','fn':'*','fields':[{'type':'fieldAccess','name':'','fieldName':'$f3'},"
-        + "{'type':'constant','name':'','value':3.0}]}]},{'type':'fieldAccess','name'"
-        + ":'','fieldName':'B'}]}]}]";
+    String postAggString = "'postAggregat

<TRUNCATED>

[4/4] calcite git commit: [CALCITE-2170] Use Druid Expressions capabilities to improve the amount of work that can be pushed to Druid

Posted by jc...@apache.org.
[CALCITE-2170] Use Druid Expressions capabilities to improve the amount of work that can be pushed to Druid

Close apache/calcite#624


Project: http://git-wip-us.apache.org/repos/asf/calcite/repo
Commit: http://git-wip-us.apache.org/repos/asf/calcite/commit/98f3704e
Tree: http://git-wip-us.apache.org/repos/asf/calcite/tree/98f3704e
Diff: http://git-wip-us.apache.org/repos/asf/calcite/diff/98f3704e

Branch: refs/heads/master
Commit: 98f3704ea4536d6ead6465376bd02139b889f6e9
Parents: 707f4de
Author: Christian Tzolov <ch...@gmail.com>
Authored: Thu Nov 3 06:30:04 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Feb 16 19:16:58 2018 -0800

----------------------------------------------------------------------
 .../adapter/druid/BinaryOperatorConversion.java |   64 +
 .../adapter/druid/CeilOperatorConversion.java   |   77 +
 .../adapter/druid/DefaultDimensionSpec.java     |   29 +-
 .../calcite/adapter/druid/DimensionSpec.java    |    8 +-
 .../adapter/druid/DirectOperatorConversion.java |   55 +
 .../adapter/druid/DruidConnectionImpl.java      |   49 +-
 .../adapter/druid/DruidDateTimeUtils.java       |   68 +
 .../calcite/adapter/druid/DruidExpressions.java |  283 +++
 .../apache/calcite/adapter/druid/DruidJson.java |   29 +
 .../calcite/adapter/druid/DruidJsonFilter.java  |  642 +++++
 .../calcite/adapter/druid/DruidQuery.java       | 2114 ++++++++--------
 .../adapter/druid/DruidResultEnumerator.java    |   25 -
 .../calcite/adapter/druid/DruidRules.java       |  626 +----
 .../adapter/druid/DruidSqlCastConverter.java    |  152 ++
 .../druid/DruidSqlOperatorConverter.java        |   49 +
 .../apache/calcite/adapter/druid/DruidType.java |   16 +-
 .../druid/ExtractOperatorConversion.java        |   80 +
 .../adapter/druid/ExtractionDimensionSpec.java  |   50 +-
 .../adapter/druid/ExtractionFunction.java       |    2 +-
 .../adapter/druid/FloorOperatorConversion.java  |   74 +
 .../calcite/adapter/druid/Granularities.java    |    4 +-
 .../calcite/adapter/druid/Granularity.java      |    2 +-
 .../adapter/druid/NaryOperatorConverter.java    |   60 +
 .../druid/SubstringOperatorConversion.java      |   63 +
 .../druid/TimeExtractionDimensionSpec.java      |   75 -
 .../adapter/druid/TimeExtractionFunction.java   |   63 +-
 .../druid/UnaryPrefixOperatorConversion.java    |   63 +
 .../druid/UnarySuffixOperatorConversion.java    |   62 +
 .../calcite/adapter/druid/VirtualColumn.java    |  100 +
 .../adapter/druid/DruidQueryFilterTest.java     |   47 +-
 .../org/apache/calcite/test/DruidAdapterIT.java | 2367 ++++++++++++------
 31 files changed, 4880 insertions(+), 2518 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/BinaryOperatorConversion.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/BinaryOperatorConversion.java b/druid/src/main/java/org/apache/calcite/adapter/druid/BinaryOperatorConversion.java
new file mode 100644
index 0000000..d10c147
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/BinaryOperatorConversion.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+
+import java.util.List;
+
+/**
+ * Binary operator conversion utility class used to convert expression like exp1 Operator exp2
+ */
+public class BinaryOperatorConversion implements DruidSqlOperatorConverter {
+  private final SqlOperator operator;
+  private final String druidOperator;
+
+  public BinaryOperatorConversion(final SqlOperator operator, final String druidOperator) {
+    this.operator = operator;
+    this.druidOperator = druidOperator;
+  }
+
+  @Override public SqlOperator calciteOperator() {
+    return operator;
+  }
+
+  @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+
+    final RexCall call = (RexCall) rexNode;
+
+    final List<String> druidExpressions = DruidExpressions.toDruidExpressions(
+        druidQuery, rowType,
+        call.getOperands());
+    if (druidExpressions == null) {
+      return null;
+    }
+    if (druidExpressions.size() != 2) {
+      throw new IllegalStateException(
+          DruidQuery.format("Got binary operator[%s] with %s args?", operator.getName(),
+              druidExpressions.size()));
+    }
+
+    return DruidQuery
+        .format("(%s %s %s)", druidExpressions.get(0), druidOperator, druidExpressions.get(1));
+  }
+}
+
+// End BinaryOperatorConversion.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/CeilOperatorConversion.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/CeilOperatorConversion.java b/druid/src/main/java/org/apache/calcite/adapter/druid/CeilOperatorConversion.java
new file mode 100644
index 0000000..7f15307
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/CeilOperatorConversion.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.avatica.util.TimeUnitRange;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+
+import java.util.TimeZone;
+
+import javax.annotation.Nullable;
+
+/**
+ * DruidSqlOperatorConverter implementation that handles Ceil operations conversions
+ */
+public class CeilOperatorConversion implements DruidSqlOperatorConverter {
+  @Override public SqlOperator calciteOperator() {
+    return SqlStdOperatorTable.CEIL;
+  }
+
+  @Nullable
+  @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType,
+      DruidQuery query) {
+    final RexCall call = (RexCall) rexNode;
+    final RexNode arg = call.getOperands().get(0);
+    final String druidExpression = DruidExpressions.toDruidExpression(
+        arg,
+        rowType,
+        query);
+    if (druidExpression == null) {
+      return null;
+    } else if (call.getOperands().size() == 1) {
+      // case CEIL(expr)
+      return  DruidQuery.format("ceil(%s)", druidExpression);
+    } else if (call.getOperands().size() == 2) {
+      // CEIL(expr TO timeUnit)
+      final RexLiteral flag = (RexLiteral) call.getOperands().get(1);
+      final TimeUnitRange timeUnit = (TimeUnitRange) flag.getValue();
+      final Granularity.Type type = DruidDateTimeUtils.toDruidGranularity(timeUnit);
+      if (type == null) {
+        // Unknown Granularity bail out
+        return null;
+      }
+      String isoPeriodFormat = DruidDateTimeUtils.toISOPeriodFormat(type);
+      if (isoPeriodFormat == null) {
+        return null;
+      }
+      return DruidExpressions.applyTimestampCeil(
+          druidExpression,
+          isoPeriodFormat,
+          "",
+          TimeZone.getTimeZone(query.getConnectionConfig().timeZone()));
+    } else {
+      return null;
+    }
+  }
+}
+
+// End CeilOperatorConversion.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DefaultDimensionSpec.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DefaultDimensionSpec.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DefaultDimensionSpec.java
index 015edff..28f99da 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/DefaultDimensionSpec.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DefaultDimensionSpec.java
@@ -17,6 +17,7 @@
 package org.apache.calcite.adapter.druid;
 
 import com.fasterxml.jackson.core.JsonGenerator;
+import com.google.common.base.Preconditions;
 
 import java.io.IOException;
 
@@ -29,17 +30,43 @@ import java.io.IOException;
 public class DefaultDimensionSpec implements DimensionSpec {
 
   private final String dimension;
+  private final String outputName;
+  private final DruidType outputType;
+
+  public DefaultDimensionSpec(String dimension, String outputName, DruidType outputType) {
+    this.dimension = Preconditions.checkNotNull(dimension);
+    this.outputName = Preconditions.checkNotNull(outputName);
+    this.outputType = outputType == null ? DruidType.STRING : outputType;
+  }
 
   public DefaultDimensionSpec(String dimension) {
-    this.dimension = dimension;
+    this(dimension, dimension, null);
   }
 
   @Override public void write(JsonGenerator generator) throws IOException {
     generator.writeStartObject();
     generator.writeStringField("type", "default");
     generator.writeStringField("dimension", dimension);
+    generator.writeStringField("outputName", outputName);
+    generator.writeStringField("outputType", outputType.name());
     generator.writeEndObject();
   }
+
+  @Override public String getOutputName() {
+    return outputName;
+  }
+
+  @Override public DruidType getOutputType() {
+    return outputType;
+  }
+
+  @Override public ExtractionFunction getExtractionFn() {
+    return null;
+  }
+
+  @Override public String getDimension() {
+    return dimension;
+  }
 }
 
 // End DefaultDimensionSpec.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DimensionSpec.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DimensionSpec.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DimensionSpec.java
index 45625c3..14c02e6 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/DimensionSpec.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DimensionSpec.java
@@ -16,12 +16,18 @@
  */
 package org.apache.calcite.adapter.druid;
 
+import javax.annotation.Nullable;
+
 /**
  * Interface for Druid DimensionSpec.
  *
  * <p>DimensionSpecs define how dimension values get transformed prior to aggregation.
  */
-public interface DimensionSpec extends DruidQuery.Json {
+public interface DimensionSpec extends DruidJson {
+  String getOutputName();
+  DruidType getOutputType();
+  @Nullable ExtractionFunction getExtractionFn();
+  String getDimension();
 }
 
 // End DimensionSpec.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DirectOperatorConversion.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DirectOperatorConversion.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DirectOperatorConversion.java
new file mode 100644
index 0000000..c937e83
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DirectOperatorConversion.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+
+import java.util.List;
+
+/**
+ * Direct operator conversion for expression like Function(exp_1,...exp_n)
+ */
+public class DirectOperatorConversion implements DruidSqlOperatorConverter {
+  private final SqlOperator operator;
+  private final String druidFunctionName;
+
+  public DirectOperatorConversion(final SqlOperator operator, final String druidFunctionName) {
+    this.operator = operator;
+    this.druidFunctionName = druidFunctionName;
+  }
+
+  @Override public SqlOperator calciteOperator() {
+    return operator;
+  }
+
+  @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+    final RexCall call = (RexCall) rexNode;
+    final List<String> druidExpressions = DruidExpressions.toDruidExpressions(
+        druidQuery, rowType,
+        call.getOperands());
+    if (druidExpressions == null) {
+      return null;
+    }
+    return DruidExpressions.functionCall(druidFunctionName, druidExpressions);
+  }
+}
+
+// End DirectOperatorConversion.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidConnectionImpl.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidConnectionImpl.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidConnectionImpl.java
index 4f65dff..40883bf 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidConnectionImpl.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidConnectionImpl.java
@@ -38,6 +38,7 @@ import com.fasterxml.jackson.databind.DeserializationFeature;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.type.CollectionType;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 
@@ -72,12 +73,15 @@ class DruidConnectionImpl implements DruidConnection {
 
   public static final String DEFAULT_RESPONSE_TIMESTAMP_COLUMN = "timestamp";
   private static final SimpleDateFormat UTC_TIMESTAMP_FORMAT;
+  private static final SimpleDateFormat TIMESTAMP_FORMAT;
 
   static {
     final TimeZone utc = DateTimeUtils.UTC_ZONE;
     UTC_TIMESTAMP_FORMAT =
         new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'", Locale.ROOT);
     UTC_TIMESTAMP_FORMAT.setTimeZone(utc);
+    TIMESTAMP_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.ROOT);
+    TIMESTAMP_FORMAT.setTimeZone(utc);
   }
 
   DruidConnectionImpl(String url, String coordinatorUrl) {
@@ -132,6 +136,10 @@ class DruidConnectionImpl implements DruidConnection {
 
     int posTimestampField = -1;
     for (int i = 0; i < fieldTypes.size(); i++) {
+      /*@TODO This need to be revisited. The logic seems implying that only
+      one column of type timestamp is present, this is not necessarily true,
+      see https://issues.apache.org/jira/browse/CALCITE-2175
+      */
       if (fieldTypes.get(i) == ColumnMetaData.Rep.JAVA_SQL_TIMESTAMP) {
         posTimestampField = i;
         break;
@@ -324,30 +332,41 @@ class DruidConnectionImpl implements DruidConnection {
     }
 
     if (isTimestampColumn || ColumnMetaData.Rep.JAVA_SQL_TIMESTAMP == type) {
-      try {
-        final long timeInMillis;
-
-        if (token == JsonToken.VALUE_NUMBER_INT) {
-          timeInMillis = parser.getLongValue();
-        } else {
+      final int fieldPos = posTimestampField != -1 ? posTimestampField : i;
+      if (token == JsonToken.VALUE_NUMBER_INT) {
+        rowBuilder.set(posTimestampField, parser.getLongValue());
+        return;
+      } else {
+        // We don't have any way to figure out the format of time upfront since we only have
+        // org.apache.calcite.avatica.ColumnMetaData.Rep.JAVA_SQL_TIMESTAMP as type to represent
+        // both timestamp and timestamp with local timezone.
+        // Logic where type is inferred can be found at DruidQuery.DruidQueryNode.getPrimitive()
+        // Thus need to guess via try and catch
+        synchronized (UTC_TIMESTAMP_FORMAT) {
           // synchronized block to avoid race condition
-          synchronized (UTC_TIMESTAMP_FORMAT) {
-            timeInMillis = UTC_TIMESTAMP_FORMAT.parse(parser.getText()).getTime();
+          try {
+            //First try to pars as Timestamp with timezone.
+            rowBuilder
+                .set(fieldPos, UTC_TIMESTAMP_FORMAT.parse(parser.getText()).getTime());
+          } catch (ParseException e) {
+            // swallow the exception and try timestamp format
+            try {
+              rowBuilder
+                  .set(fieldPos, TIMESTAMP_FORMAT.parse(parser.getText()).getTime());
+            } catch (ParseException e2) {
+              // unknown format should not happen
+              Throwables.propagate(e2);
+            }
           }
         }
-        if (posTimestampField != -1) {
-          rowBuilder.set(posTimestampField, timeInMillis);
-        }
-      } catch (ParseException e) {
-        // ignore bad value
+        return;
       }
-      return;
     }
 
     switch (token) {
     case VALUE_NUMBER_INT:
       if (type == null) {
-        type = ColumnMetaData.Rep.INTEGER;
+        type = ColumnMetaData.Rep.LONG;
       }
       // fall through
     case VALUE_NUMBER_FLOAT:

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidDateTimeUtils.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidDateTimeUtils.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidDateTimeUtils.java
index 2a9851a..91f5fa4 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidDateTimeUtils.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidDateTimeUtils.java
@@ -39,6 +39,7 @@ import com.google.common.collect.Range;
 import com.google.common.collect.TreeRangeSet;
 
 import org.joda.time.Interval;
+import org.joda.time.Period;
 import org.joda.time.chrono.ISOChronology;
 import org.slf4j.Logger;
 
@@ -46,6 +47,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.TimeZone;
 
+import javax.annotation.Nullable;
+
 /**
  * Utilities for generating intervals from RexNode.
  */
@@ -62,6 +65,7 @@ public class DruidDateTimeUtils {
    * expression. Assumes that all the predicates in the input
    * reference a single column: the timestamp column.
    */
+  @Nullable
   public static List<Interval> createInterval(RexNode e, String timeZone) {
     final List<Range<TimestampString>> ranges =
         extractRanges(e, TimeZone.getTimeZone(timeZone), false);
@@ -111,6 +115,7 @@ public class DruidDateTimeUtils {
     return intervals;
   }
 
+  @Nullable
   protected static List<Range<TimestampString>> extractRanges(RexNode node,
       TimeZone timeZone, boolean withNot) {
     switch (node.getKind()) {
@@ -171,6 +176,7 @@ public class DruidDateTimeUtils {
     }
   }
 
+  @Nullable
   protected static List<Range<TimestampString>> leafToRanges(RexCall call,
       TimeZone timeZone, boolean withNot) {
     switch (call.getKind()) {
@@ -249,6 +255,7 @@ public class DruidDateTimeUtils {
     }
   }
 
+  @Nullable
   protected static TimestampString literalValue(RexNode node, TimeZone timeZone) {
     switch (node.getKind()) {
     case LITERAL:
@@ -318,6 +325,67 @@ public class DruidDateTimeUtils {
     return Granularities.createGranularity(timeUnit, timeZone);
   }
 
+  /**
+   * @param type Druid Granularity  to translate as period of time
+   *
+   * @return String representing the granularity as ISO8601 Period of Time, null for unknown case.
+   */
+  @Nullable
+  public static String toISOPeriodFormat(Granularity.Type type) {
+    switch (type) {
+    case SECOND:
+      return Period.seconds(1).toString();
+    case MINUTE:
+      return Period.minutes(1).toString();
+    case HOUR:
+      return Period.hours(1).toString();
+    case DAY:
+      return Period.days(1).toString();
+    case WEEK:
+      return Period.weeks(1).toString();
+    case MONTH:
+      return Period.months(1).toString();
+    case QUARTER:
+      return Period.months(3).toString();
+    case YEAR:
+      return Period.years(1).toString();
+    default:
+      return null;
+    }
+  }
+
+  /**
+   * Translates Calcite TimeUnitRange to Druid {@link Granularity}
+   * @param timeUnit Calcite Time unit to convert
+   *
+   * @return Druid Granularity or null
+   */
+  @Nullable
+  public static Granularity.Type toDruidGranularity(TimeUnitRange timeUnit) {
+    if (timeUnit == null) {
+      return null;
+    }
+    switch (timeUnit) {
+    case YEAR:
+      return Granularity.Type.YEAR;
+    case QUARTER:
+      return Granularity.Type.QUARTER;
+    case MONTH:
+      return Granularity.Type.MONTH;
+    case WEEK:
+      return Granularity.Type.WEEK;
+    case DAY:
+      return Granularity.Type.DAY;
+    case HOUR:
+      return Granularity.Type.HOUR;
+    case MINUTE:
+      return Granularity.Type.MINUTE;
+    case SECOND:
+      return Granularity.Type.SECOND;
+    default:
+      return null;
+    }
+  }
 }
 
 // End DruidDateTimeUtils.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidExpressions.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidExpressions.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidExpressions.java
new file mode 100644
index 0000000..78cfb0c
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidExpressions.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.type.SqlTypeFamily;
+import org.apache.calcite.sql.type.SqlTypeName;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.io.BaseEncoding;
+import com.google.common.primitives.Chars;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.TimeZone;
+
+import javax.annotation.Nullable;
+
+/**
+ * Expression utility class to transform Calcite expressions to Druid expressions when possible.
+ */
+public class DruidExpressions {
+
+  /**
+   * Type mapping between Calcite SQL family types and native Druid expression types
+   */
+  static final Map<SqlTypeName, DruidType> EXPRESSION_TYPES;
+  /**
+   * Druid expression safe chars, must be sorted.
+   */
+  private static final char[] SAFE_CHARS = " ,._-;:(){}[]<>!@#$%^&*`~?/".toCharArray();
+
+  static {
+    final ImmutableMap.Builder<SqlTypeName, DruidType> builder = ImmutableMap.builder();
+
+    for (SqlTypeName type : SqlTypeName.FRACTIONAL_TYPES) {
+      builder.put(type, DruidType.DOUBLE);
+    }
+
+    for (SqlTypeName type : SqlTypeName.INT_TYPES) {
+      builder.put(type, DruidType.LONG);
+    }
+
+    for (SqlTypeName type : SqlTypeName.STRING_TYPES) {
+      builder.put(type, DruidType.STRING);
+    }
+    // Timestamps are treated as longs (millis since the epoch) in Druid expressions.
+    builder.put(SqlTypeName.TIMESTAMP, DruidType.LONG);
+    builder.put(SqlTypeName.DATE, DruidType.LONG);
+    builder.put(SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE, DruidType.LONG);
+    builder.put(SqlTypeName.OTHER, DruidType.COMPLEX);
+    EXPRESSION_TYPES = builder.build();
+    // Safe chars must be sorted
+    Arrays.sort(SAFE_CHARS);
+  }
+  private DruidExpressions() {
+  }
+
+
+  /**
+   * Translates Calcite rexNode to Druid Expression when possible
+   * @param rexNode rexNode to convert to a Druid Expression
+   * @param inputRowType input row type of the rexNode to translate
+   * @param druidRel Druid query
+   *
+   * @return Druid Expression or null when can not convert the RexNode
+   */
+  @Nullable
+  public static String toDruidExpression(
+      final RexNode rexNode,
+      final RelDataType inputRowType,
+      final DruidQuery druidRel) {
+    SqlKind kind = rexNode.getKind();
+    SqlTypeName sqlTypeName = rexNode.getType().getSqlTypeName();
+
+    if (kind == SqlKind.INPUT_REF) {
+      final RexInputRef ref = (RexInputRef) rexNode;
+      final String columnName = inputRowType.getFieldNames().get(ref.getIndex());
+      if (columnName == null) {
+        return null;
+      }
+      if (druidRel.getDruidTable().timestampFieldName.equals(columnName)) {
+        return DruidExpressions.fromColumn(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
+      }
+      return DruidExpressions.fromColumn(columnName);
+    }
+
+    if (rexNode instanceof RexCall) {
+      final SqlOperator operator = ((RexCall) rexNode).getOperator();
+      final DruidSqlOperatorConverter conversion = druidRel.getOperatorConversionMap()
+          .get(operator);
+      if (conversion == null) {
+        //unknown operator can not translate
+        return null;
+      } else {
+        return conversion.toDruidExpression(rexNode, inputRowType, druidRel);
+      }
+    }
+    if (kind == SqlKind.LITERAL) {
+      // Translate literal.
+      if (RexLiteral.isNullLiteral(rexNode)) {
+        //case the filter/project might yield to unknown let Calcite deal with this for now
+        return null;
+      } else if (SqlTypeName.NUMERIC_TYPES.contains(sqlTypeName)) {
+        return DruidExpressions.numberLiteral((Number) RexLiteral
+            .value(rexNode));
+      } else if (SqlTypeFamily.INTERVAL_DAY_TIME == sqlTypeName.getFamily()) {
+        // Calcite represents DAY-TIME intervals in milliseconds.
+        final long milliseconds = ((Number) RexLiteral.value(rexNode)).longValue();
+        return DruidExpressions.numberLiteral(milliseconds);
+      } else if (SqlTypeFamily.INTERVAL_YEAR_MONTH == sqlTypeName.getFamily()) {
+        // Calcite represents YEAR-MONTH intervals in months.
+        final long months = ((Number) RexLiteral.value(rexNode)).longValue();
+        return DruidExpressions.numberLiteral(months);
+      } else if (SqlTypeName.STRING_TYPES.contains(sqlTypeName)) {
+        return
+            DruidExpressions.stringLiteral(RexLiteral.stringValue(rexNode));
+      } else if (SqlTypeName.TIMESTAMP == sqlTypeName || SqlTypeName.DATE == sqlTypeName
+          || SqlTypeName.TIME_WITH_LOCAL_TIME_ZONE == sqlTypeName) {
+        return DruidExpressions.numberLiteral(DruidDateTimeUtils
+            .literalValue(rexNode, TimeZone.getTimeZone(druidRel.getConnectionConfig().timeZone()))
+            .getMillisSinceEpoch());
+      } else if (SqlTypeName.BOOLEAN == sqlTypeName) {
+        return DruidExpressions.numberLiteral(RexLiteral.booleanValue(rexNode) ? 1 : 0);
+      }
+    }
+    // Not Literal/InputRef/RexCall or unknown type?
+    return null;
+  }
+
+  public static String fromColumn(String columnName) {
+    return DruidQuery.format("\"%s\"", columnName);
+  }
+
+  public static String nullLiteral() {
+    return "null";
+  }
+
+  public static String numberLiteral(final Number n) {
+    return n == null ? nullLiteral() : n.toString();
+  }
+
+  public static String stringLiteral(final String s) {
+    return s == null ? nullLiteral() : "'" + escape(s) + "'";
+  }
+
+  private static String escape(final String s) {
+    final StringBuilder escaped = new StringBuilder();
+    for (int i = 0; i < s.length(); i++) {
+      final char c = s.charAt(i);
+      if (Character.isLetterOrDigit(c) || Arrays.binarySearch(SAFE_CHARS, c) >= 0) {
+        escaped.append(c);
+      } else {
+        escaped.append("\\u").append(BaseEncoding.base16().encode(Chars.toByteArray(c)));
+      }
+    }
+    return escaped.toString();
+  }
+
+  public static String functionCall(final String functionName, final List<String> args) {
+    Preconditions.checkNotNull(functionName, "druid functionName");
+    Preconditions.checkNotNull(args, "args");
+
+    final StringBuilder builder = new StringBuilder(functionName);
+    builder.append("(");
+    for (int i = 0; i < args.size(); i++) {
+      final String arg = Preconditions.checkNotNull(args.get(i), "arg #%s", i);
+      builder.append(arg);
+      if (i < args.size() - 1) {
+        builder.append(",");
+      }
+    }
+    builder.append(")");
+    return builder.toString();
+  }
+
+  public static String nAryOperatorCall(final String druidOperator, final List<String> args) {
+    Preconditions.checkNotNull(druidOperator, "druid operator missing");
+    Preconditions.checkNotNull(args, "args");
+    final StringBuilder builder = new StringBuilder();
+    builder.append("(");
+    for (int i = 0; i < args.size(); i++) {
+      final String arg = Preconditions.checkNotNull(args.get(i), "arg #%s", i);
+      builder.append(arg);
+      if (i < args.size() - 1) {
+        builder.append(druidOperator);
+      }
+    }
+    builder.append(")");
+    return builder.toString();
+  }
+
+  /**
+   * Translate a list of Calcite {@code RexNode} to Druid expressions.
+   *
+   * @param rexNodes list of Calcite expressions meant to be applied on top of the rows
+   *
+   * @return list of Druid expressions in the same order as rexNodes, or null if not possible.
+   * If a non-null list is returned, all elements will be non-null.
+   */
+  @Nullable
+  public static List<String> toDruidExpressions(
+      final DruidQuery druidRel, final RelDataType rowType,
+      final List<RexNode> rexNodes) {
+    final List<String> retVal = new ArrayList<>(rexNodes.size());
+    for (RexNode rexNode : rexNodes) {
+      final String druidExpression = toDruidExpression(rexNode, rowType, druidRel);
+      if (druidExpression == null) {
+        return null;
+      }
+
+      retVal.add(druidExpression);
+    }
+    return retVal;
+  }
+
+  public static String applyTimestampFloor(
+      final String input,
+      final String granularity,
+      final String origin,
+      final TimeZone timeZone) {
+    Preconditions.checkNotNull(input, "input");
+    Preconditions.checkNotNull(granularity, "granularity");
+    return DruidExpressions.functionCall(
+        "timestamp_floor",
+        ImmutableList.of(input,
+            DruidExpressions.stringLiteral(granularity),
+            DruidExpressions.stringLiteral(origin),
+            DruidExpressions.stringLiteral(timeZone.getID())));
+  }
+
+  public static String applyTimestampCeil(
+      final String input,
+      final String granularity,
+      final String origin,
+      final TimeZone timeZone) {
+    Preconditions.checkNotNull(input, "input");
+    Preconditions.checkNotNull(granularity, "granularity");
+    return DruidExpressions.functionCall(
+        "timestamp_ceil",
+        ImmutableList.of(input,
+            DruidExpressions.stringLiteral(granularity),
+            DruidExpressions.stringLiteral(origin),
+            DruidExpressions.stringLiteral(timeZone.getID())));
+  }
+
+
+  public static String applyTimeExtract(String timeExpression, String druidUnit,
+      TimeZone timeZone) {
+    return DruidExpressions.functionCall(
+        "timestamp_extract",
+        ImmutableList.of(
+            timeExpression,
+            DruidExpressions.stringLiteral(druidUnit),
+            DruidExpressions.stringLiteral(timeZone.getID())));
+  }
+}
+
+// End DruidExpressions.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidJson.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidJson.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidJson.java
new file mode 100644
index 0000000..77ccf4f
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidJson.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+
+import java.io.IOException;
+
+/** Object that knows how to write itself to a
+ * {@link com.fasterxml.jackson.core.JsonGenerator}. */
+public interface DruidJson {
+  void write(JsonGenerator generator) throws IOException;
+}
+
+// End DruidJson.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidJsonFilter.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidJsonFilter.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidJsonFilter.java
new file mode 100644
index 0000000..11ec2be
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidJsonFilter.java
@@ -0,0 +1,642 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.type.SqlTypeFamily;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.Pair;
+import org.apache.calcite.util.TimestampString;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.List;
+import java.util.Locale;
+import java.util.TimeZone;
+
+import javax.annotation.Nullable;
+
+
+/**
+ * Filter element of a Druid "groupBy" or "topN" query.
+ */
+abstract class DruidJsonFilter implements DruidJson {
+
+  /**
+   * @param rexNode    rexNode to translate to Druid Json Filter
+   * @param rowType    rowType associated to rexNode
+   * @param druidQuery druid query
+   *
+   * @return Druid Json filter or null if it can not translate
+   */
+  @Nullable
+  private static DruidJsonFilter toEqualityKindDruidFilter(RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+    if (rexNode.getKind() != SqlKind.EQUALS && rexNode.getKind() != SqlKind.NOT_EQUALS) {
+      throw new AssertionError(
+          DruidQuery.format("Expecting EQUALS or NOT_EQUALS but got [%s]", rexNode.getKind()));
+    }
+    final RexCall rexCall = (RexCall) rexNode;
+    if (rexCall.getOperands().size() < 2) {
+      return null;
+    }
+    final RexLiteral rexLiteral;
+    final RexNode refNode;
+    final RexNode lhs = rexCall.getOperands().get(0);
+    final RexNode rhs = rexCall.getOperands().get(1);
+    if (lhs.getKind() == SqlKind.LITERAL && rhs.getKind() != SqlKind.LITERAL) {
+      rexLiteral = (RexLiteral) lhs;
+      refNode = rhs;
+    } else if (rhs.getKind() == SqlKind.LITERAL && lhs.getKind() != SqlKind.LITERAL) {
+      rexLiteral = (RexLiteral) rhs;
+      refNode = lhs;
+    } else {
+      // must have at least one literal
+      return null;
+    }
+
+    if (RexLiteral.isNullLiteral(rexLiteral)) {
+      // we are not handling is NULL filter here thus we bail out if Literal is null
+      return null;
+    }
+    final String literalValue = toDruidLiteral(rexLiteral, rowType, druidQuery);
+    if (literalValue == null) {
+      // can not translate literal better bail out
+      return null;
+    }
+    final boolean isNumeric = refNode.getType().getFamily() == SqlTypeFamily.NUMERIC
+        || rexLiteral.getType().getFamily() == SqlTypeFamily.NUMERIC;
+    final Pair<String, ExtractionFunction> druidColumn = DruidQuery.toDruidColumn(refNode, rowType,
+        druidQuery);
+    final String columnName = druidColumn.left;
+    final ExtractionFunction extractionFunction = druidColumn.right;
+    if (columnName == null) {
+      // no column name better bail out.
+      return null;
+    }
+    final DruidJsonFilter partialFilter;
+    if (isNumeric) {
+      //need bound filter since it one of operands is numeric
+      partialFilter = new JsonBound(columnName, literalValue, false, literalValue, false, true,
+          extractionFunction);
+    } else {
+      partialFilter = new JsonSelector(columnName, literalValue, extractionFunction);
+    }
+
+    if (rexNode.getKind() == SqlKind.EQUALS) {
+      return partialFilter;
+    }
+    return toNotDruidFilter(partialFilter);
+  }
+
+
+  /**
+   * @param rexNode    rexNode to translate
+   * @param rowType    row type associated to Filter
+   * @param druidQuery druid query
+   *
+   * @return valid Druid Json Bound Filter or null if it can not translate the rexNode.
+   */
+  @Nullable
+  private static DruidJsonFilter toBoundDruidFilter(RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+    final RexCall rexCall = (RexCall) rexNode;
+    final RexLiteral rexLiteral;
+    if (rexCall.getOperands().size() < 2) {
+      return null;
+    }
+    final RexNode refNode;
+    final RexNode lhs = rexCall.getOperands().get(0);
+    final RexNode rhs = rexCall.getOperands().get(1);
+    final boolean lhsIsRef;
+    if (lhs.getKind() == SqlKind.LITERAL && rhs.getKind() != SqlKind.LITERAL) {
+      rexLiteral = (RexLiteral) lhs;
+      refNode = rhs;
+      lhsIsRef = false;
+    } else if (rhs.getKind() == SqlKind.LITERAL && lhs.getKind() != SqlKind.LITERAL) {
+      rexLiteral = (RexLiteral) rhs;
+      refNode = lhs;
+      lhsIsRef = true;
+    } else {
+      // must have at least one literal
+      return null;
+    }
+
+    if (RexLiteral.isNullLiteral(rexLiteral)) {
+      // we are not handling is NULL filter here thus we bail out if Literal is null
+      return null;
+    }
+    final String literalValue = DruidJsonFilter.toDruidLiteral(rexLiteral, rowType, druidQuery);
+    if (literalValue == null) {
+      // can not translate literal better bail out
+      return null;
+    }
+    final boolean isNumeric = refNode.getType().getFamily() == SqlTypeFamily.NUMERIC
+        || rexLiteral.getType().getFamily() == SqlTypeFamily.NUMERIC;
+    final Pair<String, ExtractionFunction> druidColumn = DruidQuery.toDruidColumn(refNode, rowType,
+        druidQuery);
+    final String columnName = druidColumn.left;
+    final ExtractionFunction extractionFunction = druidColumn.right;
+    if (columnName == null) {
+      // no column name better bail out.
+      return null;
+    }
+    switch (rexCall.getKind()) {
+    case LESS_THAN_OR_EQUAL:
+    case LESS_THAN:
+      if (lhsIsRef) {
+        return new JsonBound(columnName, null, false, literalValue,
+            rexCall.getKind() == SqlKind.LESS_THAN, isNumeric,
+            extractionFunction);
+      } else {
+        return new JsonBound(columnName, literalValue, rexCall.getKind() == SqlKind.LESS_THAN, null,
+            false, isNumeric,
+            extractionFunction);
+      }
+    case GREATER_THAN_OR_EQUAL:
+    case GREATER_THAN:
+      if (!lhsIsRef) {
+        return new JsonBound(columnName, null, false, literalValue,
+            rexCall.getKind() == SqlKind.GREATER_THAN, isNumeric,
+            extractionFunction);
+      } else {
+        return new JsonBound(columnName, literalValue, rexCall.getKind() == SqlKind.GREATER_THAN,
+            null,
+            false, isNumeric,
+            extractionFunction);
+      }
+    default:
+      return null;
+    }
+
+  }
+
+  /**
+   * @param rexNode    rexNode to translate to Druid literal equivalante
+   * @param rowType    rowType associated to rexNode
+   * @param druidQuery druid Query
+   *
+   * @return non null string or null if it can not translate to valid Druid equivalent
+   */
+  @Nullable
+  private static String toDruidLiteral(RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+    final SimpleDateFormat dateFormatter = new SimpleDateFormat(
+        TimeExtractionFunction.ISO_TIME_FORMAT,
+        Locale.ROOT);
+    final String timeZone = druidQuery.getConnectionConfig().timeZone();
+    if (timeZone != null) {
+      dateFormatter.setTimeZone(TimeZone.getTimeZone(timeZone));
+    }
+    final String val;
+    final RexLiteral rhsLiteral = (RexLiteral) rexNode;
+    if (SqlTypeName.NUMERIC_TYPES.contains(rhsLiteral.getTypeName())) {
+      val = String.valueOf(RexLiteral.value(rhsLiteral));
+    } else if (SqlTypeName.CHAR_TYPES.contains(rhsLiteral.getTypeName())) {
+      val = String.valueOf(RexLiteral.stringValue(rhsLiteral));
+    } else if (SqlTypeName.TIMESTAMP == rhsLiteral.getTypeName() || SqlTypeName.DATE == rhsLiteral
+        .getTypeName() || SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE == rhsLiteral.getTypeName()) {
+      TimestampString timestampString = DruidDateTimeUtils
+          .literalValue(rexNode, TimeZone.getTimeZone(timeZone));
+      if (timestampString == null) {
+        throw new AssertionError(
+            "Cannot translate Literal" + rexNode + " of type "
+                + rhsLiteral.getTypeName() + " to TimestampString");
+      }
+      //@TODO this is unnecessary we can send time as Long (eg millis since epoch) to druid
+      val = dateFormatter.format(timestampString.getMillisSinceEpoch());
+    } else {
+      // Don't know how to filter on this kind of literal.
+      val = null;
+    }
+    return val;
+  }
+
+  @Nullable
+  private static DruidJsonFilter toIsNullKindDruidFilter(RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+    if (rexNode.getKind() != SqlKind.IS_NULL && rexNode.getKind() != SqlKind.IS_NOT_NULL) {
+      throw new AssertionError(
+          DruidQuery.format("Expecting IS_NULL or IS_NOT_NULL but got [%s]", rexNode.getKind()));
+    }
+    final RexCall rexCall = (RexCall) rexNode;
+    final RexNode refNode = rexCall.getOperands().get(0);
+    Pair<String, ExtractionFunction> druidColumn = DruidQuery
+        .toDruidColumn(refNode, rowType, druidQuery);
+    final String columnName = druidColumn.left;
+    final ExtractionFunction extractionFunction = druidColumn.right;
+    if (columnName == null) {
+      return null;
+    }
+    if (rexNode.getKind() == SqlKind.IS_NOT_NULL) {
+      return toNotDruidFilter(new JsonSelector(columnName, null, extractionFunction));
+    }
+    return new JsonSelector(columnName, null, extractionFunction);
+  }
+
+  @Nullable
+  private static DruidJsonFilter toInKindDruidFilter(RexNode e, RelDataType rowType,
+      DruidQuery druidQuery) {
+    if (e.getKind() != SqlKind.IN && e.getKind() != SqlKind.NOT_IN) {
+      throw new AssertionError(
+          DruidQuery.format("Expecting IN or NOT IN but got [%s]", e.getKind()));
+    }
+    ImmutableList.Builder<String> listBuilder = ImmutableList.builder();
+    for (RexNode rexNode : ((RexCall) e).getOperands()) {
+      if (rexNode.getKind() == SqlKind.LITERAL) {
+        String value = toDruidLiteral(rexNode, rowType, druidQuery);
+        if (value == null) {
+          return null;
+        }
+        listBuilder.add(value);
+      }
+    }
+    Pair<String, ExtractionFunction> druidColumn = DruidQuery
+        .toDruidColumn(((RexCall) e).getOperands().get(0),
+        rowType, druidQuery);
+    final String columnName = druidColumn.left;
+    final ExtractionFunction extractionFunction = druidColumn.right;
+    if (columnName == null) {
+      return null;
+    }
+    if (e.getKind() != SqlKind.NOT_IN) {
+      return new DruidJsonFilter.JsonInFilter(columnName, listBuilder.build(), extractionFunction);
+    } else {
+      return toNotDruidFilter(
+          new DruidJsonFilter.JsonInFilter(columnName, listBuilder.build(), extractionFunction));
+    }
+  }
+
+  @Nullable
+  protected static DruidJsonFilter toNotDruidFilter(DruidJsonFilter druidJsonFilter) {
+    if (druidJsonFilter == null) {
+      return null;
+    }
+    return new JsonCompositeFilter(Type.NOT, druidJsonFilter);
+  }
+
+  @Nullable
+  private static DruidJsonFilter toBetweenDruidFilter(RexNode rexNode, RelDataType rowType,
+      DruidQuery query) {
+    if (rexNode.getKind() != SqlKind.BETWEEN) {
+      return null;
+    }
+    final RexCall rexCall = (RexCall) rexNode;
+    if (rexCall.getOperands().size() < 4) {
+      return null;
+    }
+    // BETWEEN (ASYMMETRIC, REF, 'lower-bound', 'upper-bound')
+    final RexNode refNode = rexCall.getOperands().get(1);
+    final RexNode lhs = rexCall.getOperands().get(2);
+    final RexNode rhs = rexCall.getOperands().get(3);
+
+    final String lhsLiteralValue = toDruidLiteral(lhs, rowType, query);
+    final String rhsLiteralValue = toDruidLiteral(rhs, rowType, query);
+    if (lhsLiteralValue == null || rhsLiteralValue == null) {
+      return null;
+    }
+    final boolean isNumeric = lhs.getType().getFamily() == SqlTypeFamily.NUMERIC
+        || lhs.getType().getFamily() == SqlTypeFamily.NUMERIC;
+    final Pair<String, ExtractionFunction> druidColumn = DruidQuery
+        .toDruidColumn(refNode, rowType, query);
+    final String columnName = druidColumn.left;
+    final ExtractionFunction extractionFunction = druidColumn.right;
+
+    if (columnName == null) {
+      return null;
+    }
+    return new JsonBound(columnName, lhsLiteralValue, false, rhsLiteralValue,
+        false, isNumeric,
+        extractionFunction);
+
+  }
+
+  @Nullable
+  private static DruidJsonFilter toSimpleDruidFilter(RexNode e, RelDataType rowType,
+      DruidQuery druidQuery) {
+    switch (e.getKind()) {
+    case EQUALS:
+    case NOT_EQUALS:
+      return toEqualityKindDruidFilter(e, rowType, druidQuery);
+    case GREATER_THAN:
+    case GREATER_THAN_OR_EQUAL:
+    case LESS_THAN:
+    case LESS_THAN_OR_EQUAL:
+      return toBoundDruidFilter(e, rowType, druidQuery);
+    case BETWEEN:
+      return toBetweenDruidFilter(e, rowType, druidQuery);
+    case IN:
+    case NOT_IN:
+      return toInKindDruidFilter(e, rowType, druidQuery);
+    case IS_NULL:
+    case IS_NOT_NULL:
+      return toIsNullKindDruidFilter(e, rowType, druidQuery);
+    default:
+      return null;
+    }
+  }
+
+  /**
+   * @param rexNode    rexNode to translate to Druid Filter
+   * @param rowType    rowType of filter input
+   * @param druidQuery Druid query
+   *
+   * @return Druid Json Filters or null when can not translate to valid Druid Filters.
+   */
+  @Nullable
+  static DruidJsonFilter toDruidFilters(final RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+    if (rexNode.isAlwaysTrue()) {
+      return JsonExpressionFilter.alwaysTrue();
+    }
+    if (rexNode.isAlwaysFalse()) {
+      return JsonExpressionFilter.alwaysFalse();
+    }
+    switch (rexNode.getKind()) {
+    case IS_TRUE:
+    case IS_NOT_FALSE:
+      return toDruidFilters(Iterables.getOnlyElement(((RexCall) rexNode).getOperands()), rowType,
+          druidQuery);
+    case IS_NOT_TRUE:
+    case IS_FALSE:
+      final DruidJsonFilter simpleFilter = toDruidFilters(Iterables
+          .getOnlyElement(((RexCall) rexNode).getOperands()), rowType, druidQuery);
+      return simpleFilter != null ? new JsonCompositeFilter(Type.NOT, simpleFilter)
+          : simpleFilter;
+    case AND:
+    case OR:
+    case NOT:
+      final RexCall call = (RexCall) rexNode;
+      final List<DruidJsonFilter> jsonFilters = Lists.newArrayList();
+      for (final RexNode e : call.getOperands()) {
+        final DruidJsonFilter druidFilter = toDruidFilters(e, rowType, druidQuery);
+        if (druidFilter == null) {
+          return null;
+        }
+        jsonFilters.add(druidFilter);
+      }
+      return new JsonCompositeFilter(Type.valueOf(rexNode.getKind().name()),
+          jsonFilters);
+    }
+
+    final DruidJsonFilter simpleLeafFilter = toSimpleDruidFilter(rexNode, rowType, druidQuery);
+    return simpleLeafFilter == null
+        ? toDruidExpressionFilter(rexNode, rowType, druidQuery)
+        : simpleLeafFilter;
+  }
+
+  @Nullable
+  private static DruidJsonFilter toDruidExpressionFilter(RexNode rexNode, RelDataType rowType,
+      DruidQuery query) {
+    final String expression = DruidExpressions.toDruidExpression(rexNode, rowType, query);
+    return expression == null ? null : new JsonExpressionFilter(expression);
+  }
+
+  /**
+   * Supported filter types
+   */
+  protected enum Type {
+    AND,
+    OR,
+    NOT,
+    SELECTOR,
+    IN,
+    BOUND,
+    EXPRESSION;
+
+    public String lowercase() {
+      return name().toLowerCase(Locale.ROOT);
+    }
+  }
+
+  protected final Type type;
+
+  private DruidJsonFilter(Type type) {
+    this.type = type;
+  }
+
+  /**
+   * Druid Expression filter.
+   */
+  public static class JsonExpressionFilter extends DruidJsonFilter {
+    private final String expression;
+
+    JsonExpressionFilter(String expression) {
+      super(Type.EXPRESSION);
+      this.expression = Preconditions.checkNotNull(expression);
+    }
+
+    @Override public void write(JsonGenerator generator) throws IOException {
+      generator.writeStartObject();
+      generator.writeStringField("type", type.lowercase());
+      generator.writeStringField("expression", expression);
+      generator.writeEndObject();
+    }
+
+    /**
+     * We need to push to Druid an expression that always evaluates to true.
+     */
+    private static JsonExpressionFilter alwaysTrue() {
+      return new JsonExpressionFilter("1 == 1");
+    }
+
+    /**
+     * We need to push to Druid an expression that always evaluates to false.
+     */
+    private static JsonExpressionFilter alwaysFalse() {
+      return new JsonExpressionFilter("1 == 2");
+    }
+  }
+
+  /**
+   * Equality filter.
+   */
+  private static class JsonSelector extends DruidJsonFilter {
+    private final String dimension;
+
+    private final String value;
+
+    private final ExtractionFunction extractionFunction;
+
+    private JsonSelector(String dimension, String value,
+        ExtractionFunction extractionFunction) {
+      super(Type.SELECTOR);
+      this.dimension = dimension;
+      this.value = value;
+      this.extractionFunction = extractionFunction;
+    }
+
+    public void write(JsonGenerator generator) throws IOException {
+      generator.writeStartObject();
+      generator.writeStringField("type", type.lowercase());
+      generator.writeStringField("dimension", dimension);
+      generator.writeStringField("value", value);
+      DruidQuery.writeFieldIf(generator, "extractionFn", extractionFunction);
+      generator.writeEndObject();
+    }
+  }
+
+  /**
+   * Bound filter.
+   */
+  @VisibleForTesting
+  protected static class JsonBound extends DruidJsonFilter {
+    private final String dimension;
+
+    private final String lower;
+
+    private final boolean lowerStrict;
+
+    private final String upper;
+
+    private final boolean upperStrict;
+
+    private final boolean alphaNumeric;
+
+    private final ExtractionFunction extractionFunction;
+
+    protected JsonBound(String dimension, String lower,
+        boolean lowerStrict, String upper, boolean upperStrict,
+        boolean alphaNumeric, ExtractionFunction extractionFunction) {
+      super(Type.BOUND);
+      this.dimension = dimension;
+      this.lower = lower;
+      this.lowerStrict = lowerStrict;
+      this.upper = upper;
+      this.upperStrict = upperStrict;
+      this.alphaNumeric = alphaNumeric;
+      this.extractionFunction = extractionFunction;
+    }
+
+    public void write(JsonGenerator generator) throws IOException {
+      generator.writeStartObject();
+      generator.writeStringField("type", type.lowercase());
+      generator.writeStringField("dimension", dimension);
+      if (lower != null) {
+        generator.writeStringField("lower", lower);
+        generator.writeBooleanField("lowerStrict", lowerStrict);
+      }
+      if (upper != null) {
+        generator.writeStringField("upper", upper);
+        generator.writeBooleanField("upperStrict", upperStrict);
+      }
+      if (alphaNumeric) {
+        generator.writeStringField("ordering", "numeric");
+      } else {
+        generator.writeStringField("ordering", "lexicographic");
+      }
+      DruidQuery.writeFieldIf(generator, "extractionFn", extractionFunction);
+      generator.writeEndObject();
+    }
+  }
+
+  /**
+   * Filter that combines other filters using a boolean operator.
+   */
+  private static class JsonCompositeFilter extends DruidJsonFilter {
+    private final List<? extends DruidJsonFilter> fields;
+
+    private JsonCompositeFilter(Type type,
+        Iterable<? extends DruidJsonFilter> fields) {
+      super(type);
+      this.fields = ImmutableList.copyOf(fields);
+    }
+
+    private JsonCompositeFilter(Type type, DruidJsonFilter... fields) {
+      this(type, ImmutableList.copyOf(fields));
+    }
+
+    public void write(JsonGenerator generator) throws IOException {
+      generator.writeStartObject();
+      generator.writeStringField("type", type.lowercase());
+      switch (type) {
+      case NOT:
+        DruidQuery.writeField(generator, "field", fields.get(0));
+        break;
+      default:
+        DruidQuery.writeField(generator, "fields", fields);
+      }
+      generator.writeEndObject();
+    }
+  }
+
+  /**
+   * IN filter.
+   */
+  protected static class JsonInFilter extends DruidJsonFilter {
+    private final String dimension;
+
+    private final List<String> values;
+
+    private final ExtractionFunction extractionFunction;
+
+    protected JsonInFilter(String dimension, List<String> values,
+        ExtractionFunction extractionFunction) {
+      super(Type.IN);
+      this.dimension = dimension;
+      this.values = values;
+      this.extractionFunction = extractionFunction;
+    }
+
+    public void write(JsonGenerator generator) throws IOException {
+      generator.writeStartObject();
+      generator.writeStringField("type", type.lowercase());
+      generator.writeStringField("dimension", dimension);
+      DruidQuery.writeField(generator, "values", values);
+      DruidQuery.writeFieldIf(generator, "extractionFn", extractionFunction);
+      generator.writeEndObject();
+    }
+  }
+
+  public static DruidJsonFilter getSelectorFilter(String column, String value,
+      ExtractionFunction extractionFunction) {
+    Preconditions.checkNotNull(column);
+    return new JsonSelector(column, value, extractionFunction);
+  }
+
+  /**
+   * Druid Having Filter spec
+   */
+  protected static class JsonDimHavingFilter implements DruidJson {
+
+    private final DruidJsonFilter filter;
+
+    public JsonDimHavingFilter(DruidJsonFilter filter) {
+      this.filter = filter;
+    }
+
+    @Override public void write(JsonGenerator generator) throws IOException {
+      generator.writeStartObject();
+      generator.writeStringField("type", "filter");
+      DruidQuery.writeField(generator, "filter", filter);
+      generator.writeEndObject();
+    }
+  }
+}
+
+// End DruidJsonFilter.java


[2/4] calcite git commit: [CALCITE-2170] Use Druid Expressions capabilities to improve the amount of work that can be pushed to Druid

Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidResultEnumerator.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidResultEnumerator.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidResultEnumerator.java
deleted file mode 100644
index 35b97b3..0000000
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidResultEnumerator.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.calcite.adapter.druid;
-
-/**
- * Created by jhyde on 3/9/16.
- */
-public class DruidResultEnumerator {
-}
-
-// End DruidResultEnumerator.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidRules.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidRules.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidRules.java
index 290f548..7b6bc78 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidRules.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidRules.java
@@ -22,7 +22,6 @@ import org.apache.calcite.plan.RelOptPredicateList;
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptRuleCall;
 import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Aggregate;
 import org.apache.calcite.rel.core.AggregateCall;
@@ -41,6 +40,7 @@ import org.apache.calcite.rel.rules.PushProjector;
 import org.apache.calcite.rel.rules.SortProjectTransposeRule;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexExecutor;
@@ -50,14 +50,10 @@ import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexShuttle;
 import org.apache.calcite.rex.RexSimplify;
 import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.runtime.PredicateImpl;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.type.SqlTypeFamily;
-import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.tools.RelBuilder;
 import org.apache.calcite.tools.RelBuilderFactory;
-import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.Pair;
 import org.apache.calcite.util.Util;
 import org.apache.calcite.util.trace.CalciteTrace;
@@ -67,7 +63,7 @@ import org.apache.commons.lang3.tuple.Triple;
 
 import com.google.common.base.Predicate;
 import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 
 import org.joda.time.Interval;
@@ -79,6 +75,8 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
+import javax.annotation.Nullable;
+
 /**
  * Rules and relational operators for {@link DruidQuery}.
  */
@@ -113,6 +111,8 @@ public class DruidRules {
       new DruidPostAggregationProjectRule(RelFactories.LOGICAL_BUILDER);
   public static final DruidAggregateExtractProjectRule PROJECT_EXTRACT_RULE =
       new DruidAggregateExtractProjectRule(RelFactories.LOGICAL_BUILDER);
+  public static final DruidHavingFilterRule DRUID_HAVING_FILTER_RULE =
+      new DruidHavingFilterRule(RelFactories.LOGICAL_BUILDER);
 
   public static final List<RelOptRule> RULES =
       ImmutableList.of(FILTER,
@@ -127,73 +127,8 @@ public class DruidRules {
           FILTER_PROJECT_TRANSPOSE,
           PROJECT_SORT_TRANSPOSE,
           SORT,
-          SORT_PROJECT_TRANSPOSE);
-
-  /** Predicate that returns whether Druid can not handle an aggregate. */
-  private static final Predicate<Triple<Aggregate, RelNode, DruidQuery>> BAD_AGG =
-      new PredicateImpl<Triple<Aggregate, RelNode, DruidQuery>>() {
-        public boolean test(Triple<Aggregate, RelNode, DruidQuery> triple) {
-          final Aggregate aggregate = triple.getLeft();
-          final RelNode node = triple.getMiddle();
-          final DruidQuery query = triple.getRight();
-
-          final CalciteConnectionConfig config = query.getConnectionConfig();
-          for (AggregateCall aggregateCall : aggregate.getAggCallList()) {
-            switch (aggregateCall.getAggregation().getKind()) {
-            case COUNT:
-              // Druid count aggregator can handle 3 scenarios:
-              // 1. count(distinct col) when approximate results
-              //    are acceptable and col is not a metric.
-              //    Note that exact count(distinct column) is handled
-              //    by being rewritten into group by followed by count
-              // 2. count(*)
-              // 3. count(column)
-
-              if (checkAggregateOnMetric(ImmutableBitSet.of(aggregateCall.getArgList()),
-                      node, query)) {
-                return true;
-              }
-              // case count(*)
-              if (aggregateCall.getArgList().isEmpty()) {
-                continue;
-              }
-              // case count(column)
-              if (aggregateCall.getArgList().size() == 1 && !aggregateCall.isDistinct()) {
-                continue;
-              }
-              // case count(distinct and is approximate)
-              if (aggregateCall.isDistinct()
-                      && (aggregateCall.isApproximate() || config.approximateDistinctCount())) {
-                continue;
-              }
-              return true;
-            case SUM:
-            case SUM0:
-            case MIN:
-            case MAX:
-              final RelDataType type = aggregateCall.getType();
-              final SqlTypeName sqlTypeName = type.getSqlTypeName();
-              if (SqlTypeFamily.APPROXIMATE_NUMERIC.getTypeNames().contains(sqlTypeName)
-                      || SqlTypeFamily.INTEGER.getTypeNames().contains(sqlTypeName)) {
-                continue;
-              } else if (SqlTypeFamily.EXACT_NUMERIC.getTypeNames().contains(sqlTypeName)) {
-                // Decimal
-                assert sqlTypeName == SqlTypeName.DECIMAL;
-                if (type.getScale() == 0 || config.approximateDecimal()) {
-                  // If scale is zero or we allow approximating decimal, we can proceed
-                  continue;
-                }
-              }
-              // Cannot handle this aggregate function
-              return true;
-            default:
-              // Cannot handle this aggregate function
-              return true;
-            }
-          }
-          return false;
-        }
-      };
+          SORT_PROJECT_TRANSPOSE,
+          DRUID_HAVING_FILTER_RULE);
 
   /**
    * Rule to push a {@link org.apache.calcite.rel.core.Filter} into a {@link DruidQuery}.
@@ -231,7 +166,9 @@ public class DruidRules {
           new RexSimplify(rexBuilder, predicates, true, executor);
       final RexNode cond = simplify.simplify(filter.getCondition());
       for (RexNode e : RelOptUtil.conjunctions(cond)) {
-        if (query.isValidFilter(e)) {
+        DruidJsonFilter druidJsonFilter = DruidJsonFilter
+            .toDruidFilters(e, filter.getInput().getRowType(), query);
+        if (druidJsonFilter != null) {
           validPreds.add(e);
         } else {
           nonValidPreds.add(e);
@@ -239,19 +176,17 @@ public class DruidRules {
       }
 
       // Timestamp
-      int timestampFieldIdx = -1;
-      for (int i = 0; i < query.getRowType().getFieldCount(); i++) {
-        if (query.druidTable.timestampFieldName.equals(
-                query.getRowType().getFieldList().get(i).getName())) {
-          timestampFieldIdx = i;
-          break;
-        }
-      }
-
+      int timestampFieldIdx = Iterables
+          .indexOf(query.getRowType().getFieldList(), new Predicate<RelDataTypeField>() {
+            @Override public boolean apply(@Nullable RelDataTypeField input) {
+              return query.druidTable.timestampFieldName.equals(input.getName());
+            }
+          });
+      RelNode newDruidQuery = query;
       final Triple<List<RexNode>, List<RexNode>, List<RexNode>> triple =
           splitFilters(rexBuilder, query, validPreds, nonValidPreds, timestampFieldIdx);
       if (triple.getLeft().isEmpty() && triple.getMiddle().isEmpty()) {
-        // We can't push anything useful to Druid.
+        //it sucks, nothing to push
         return;
       }
       final List<RexNode> residualPreds = new ArrayList<>(triple.getRight());
@@ -262,13 +197,14 @@ public class DruidRules {
         assert timeZone != null;
         intervals = DruidDateTimeUtils.createInterval(
             RexUtil.composeConjunction(rexBuilder, triple.getLeft(), false),
-            timeZone);
+
+            query.getConnectionConfig().timeZone());
         if (intervals == null || intervals.isEmpty()) {
-          // Case we have an filter with extract that can not be written as interval push down
+          // Case we have a filter with extract that can not be written as interval push down
           triple.getMiddle().addAll(triple.getLeft());
         }
       }
-      RelNode newDruidQuery = query;
+
       if (!triple.getMiddle().isEmpty()) {
         final RelNode newFilter = filter.copy(filter.getTraitSet(), Util.last(query.rels),
             RexUtil.composeConjunction(rexBuilder, triple.getMiddle(), false));
@@ -304,13 +240,9 @@ public class DruidRules {
       for (RexNode conj : validPreds) {
         final RelOptUtil.InputReferencedVisitor visitor = new RelOptUtil.InputReferencedVisitor();
         conj.accept(visitor);
-        if (visitor.inputPosReferenced.contains(timestampFieldIdx)) {
-          if (visitor.inputPosReferenced.size() != 1) {
-            // Complex predicate, transformation currently not supported
-            nonPushableNodes.add(conj);
-          } else {
-            timeRangeNodes.add(conj);
-          }
+        if (visitor.inputPosReferenced.contains(timestampFieldIdx)
+            && visitor.inputPosReferenced.size() == 1) {
+          timeRangeNodes.add(conj);
         } else {
           pushableNodes.add(conj);
         }
@@ -320,6 +252,36 @@ public class DruidRules {
   }
 
   /**
+   * Rule to Push a Having {@link Filter} into a {@link DruidQuery}
+   */
+  public static class DruidHavingFilterRule extends RelOptRule {
+
+    public DruidHavingFilterRule(RelBuilderFactory relBuilderFactory) {
+      super(operand(Filter.class, operand(DruidQuery.class, none())),
+          relBuilderFactory, null);
+    }
+
+    @Override public void onMatch(RelOptRuleCall call) {
+      final Filter filter = call.rel(0);
+      final DruidQuery query = call.rel(1);
+
+      if (!DruidQuery.isValidSignature(query.signature() + 'h')) {
+        return;
+      }
+
+      final RexNode cond = filter.getCondition();
+      final DruidJsonFilter druidJsonFilter = DruidJsonFilter
+          .toDruidFilters(cond, query.getTopNode().getRowType(), query);
+      if (druidJsonFilter != null) {
+        final RelNode newFilter = filter
+            .copy(filter.getTraitSet(), Util.last(query.rels), filter.getCondition());
+        final DruidQuery newDruidQuery = DruidQuery.extendQuery(query, newFilter);
+        call.transformTo(newDruidQuery);
+      }
+    }
+  }
+
+  /**
    * Rule to push a {@link org.apache.calcite.rel.core.Project} into a {@link DruidQuery}.
    */
   public static class DruidProjectRule extends RelOptRule {
@@ -343,14 +305,16 @@ public class DruidRules {
         return;
       }
 
-      if (canProjectAll(project.getProjects())) {
+      if (DruidQuery.computeProjectAsScan(project, query.getTable().getRowType(), query)
+          != null) {
         // All expressions can be pushed to Druid in their entirety.
         final RelNode newProject = project.copy(project.getTraitSet(),
-                ImmutableList.of(Util.last(query.rels)));
+            ImmutableList.of(Util.last(query.rels)));
         RelNode newNode = DruidQuery.extendQuery(query, newProject);
         call.transformTo(newNode);
         return;
       }
+
       final Pair<List<RexNode>, List<RexNode>> pair =
           splitProjects(rexBuilder, query, project.getProjects());
       if (pair == null) {
@@ -378,15 +342,6 @@ public class DruidRules {
       call.transformTo(newProject2);
     }
 
-    private static boolean canProjectAll(List<RexNode> nodes) {
-      for (RexNode e : nodes) {
-        if (!(e instanceof RexInputRef)) {
-          return false;
-        }
-      }
-      return true;
-    }
-
     private static Pair<List<RexNode>, List<RexNode>> splitProjects(final RexBuilder rexBuilder,
             final RelNode input, List<RexNode> nodes) {
       final RelOptUtil.InputReferencedVisitor visitor = new RelOptUtil.InputReferencedVisitor();
@@ -442,183 +397,37 @@ public class DruidRules {
     public void onMatch(RelOptRuleCall call) {
       Project project = call.rel(0);
       DruidQuery query = call.rel(1);
-      final RelOptCluster cluster = project.getCluster();
-      final RexBuilder rexBuilder = cluster.getRexBuilder();
       if (!DruidQuery.isValidSignature(query.signature() + 'o')) {
         return;
       }
-      Pair<ImmutableMap<String, String>, Boolean> scanned = scanProject(query, project);
-      // Only try to push down Project when there will be Post aggregators in result DruidQuery
-      if (scanned.right) {
-        Pair<Project, Project> splitProjectAggregate = splitProject(rexBuilder, query,
-                project, scanned.left, cluster);
-        Project inner = splitProjectAggregate.left;
-        Project outer = splitProjectAggregate.right;
-        DruidQuery newQuery = DruidQuery.extendQuery(query, inner);
-        // When all project get pushed into DruidQuery, the project can be replaced by DruidQuery.
-        if (outer != null) {
-          Project newProject = outer.copy(outer.getTraitSet(), newQuery, outer.getProjects(),
-              outer.getRowType());
-          call.transformTo(newProject);
-        } else {
-          call.transformTo(newQuery);
+      boolean hasRexCalls = false;
+      for (RexNode rexNode : project.getChildExps()) {
+        if (rexNode instanceof RexCall) {
+          hasRexCalls = true;
+          break;
         }
       }
-    }
+      // Only try to push down Project when there will be Post aggregators in result DruidQuery
+      if (hasRexCalls) {
 
-    /**
-     * Similar to split Project in DruidProjectRule. It used the name mapping from scanProject
-     * to render the correct field names of inner project so that the outer project can correctly
-     * refer to them. For RexNode that can be parsed into post aggregator, they will get pushed in
-     * before input reference, then outer project can simply refer to those pushed in RexNode to
-     * get result.
-     * @param rexBuilder builder from cluster
-     * @param query matched Druid Query
-     * @param project matched project takes in druid
-     * @param nameMap Result nameMapping from scanProject
-     * @param cluster cluster that provide builder for row type.
-     * @return Triple object contains inner project, outer project and required
-     *         Json Post Aggregation objects to be pushed down into Druid Query.
-     */
-    public Pair<Project, Project> splitProject(final RexBuilder rexBuilder,
-        DruidQuery query, Project project, ImmutableMap<String, String> nameMap,
-        final RelOptCluster cluster) {
-      //Visit & Build Inner Project
-      final List<RexNode> innerRex = new ArrayList<>();
-      final RelDataTypeFactory.Builder typeBuilder =
-          cluster.getTypeFactory().builder();
-      final RelOptUtil.InputReferencedVisitor visitor =
-          new RelOptUtil.InputReferencedVisitor();
-      final List<Integer> positions = new ArrayList<>();
-      final List<RelDataType> innerTypes = new ArrayList<>();
-      // Similar logic to splitProject in DruidProject Rule
-      // However, post aggregation will also be output of DruidQuery and they will be
-      // added before other input.
-      int offset = 0;
-      for (Pair<RexNode, String> pair : project.getNamedProjects()) {
-        RexNode rex = pair.left;
-        String name = pair.right;
-        String fieldName = nameMap.get(name);
-        if (fieldName == null) {
-          rex.accept(visitor);
+        final RelNode topNode = query.getTopNode();
+        final Aggregate topAgg;
+        if (topNode instanceof Aggregate) {
+          topAgg = (Aggregate) topNode;
         } else {
-          final RexNode node = rexBuilder.copy(rex);
-          innerRex.add(node);
-          positions.add(offset++);
-          typeBuilder.add(nameMap.get(name), node.getType());
-          innerTypes.add(node.getType());
+          topAgg = (Aggregate) ((Filter) topNode).getInput();
         }
-      }
-      // Other referred input will be added into the inner project rex list.
-      positions.addAll(visitor.inputPosReferenced);
-      for (int i : visitor.inputPosReferenced) {
-        final RexNode node = rexBuilder.makeInputRef(Util.last(query.rels), i);
-        innerRex.add(node);
-        typeBuilder.add(query.getRowType().getFieldNames().get(i), node.getType());
-        innerTypes.add(node.getType());
-      }
-      Project innerProject = project.copy(project.getTraitSet(), Util.last(query.rels), innerRex,
-          typeBuilder.build());
-      // If the whole project is pushed, we do not need to do anything else.
-      if (project.getNamedProjects().size() == nameMap.size()) {
-        return new Pair<>(innerProject, null);
-      }
-      // Build outer Project when some projects are left in outer project.
-      offset = 0;
-      final List<RexNode> outerRex = new ArrayList<>();
-      for (Pair<RexNode, String> pair : project.getNamedProjects()) {
-        RexNode rex = pair.left;
-        String name = pair.right;
-        if (!nameMap.containsKey(name)) {
-          outerRex.add(
-              rex.accept(
-                  new RexShuttle() {
-                    @Override public RexNode visitInputRef(RexInputRef ref) {
-                      final int j = positions.indexOf(ref.getIndex());
-                      return rexBuilder.makeInputRef(innerTypes.get(j), j);
-                    }
-                  }));
-        } else {
-          outerRex.add(
-              rexBuilder.makeInputRef(rex.getType(),
-                  positions.indexOf(offset++)));
-        }
-      }
-      Project outerProject = project.copy(project.getTraitSet(), innerProject,
-          outerRex, project.getRowType());
-      return new Pair<>(innerProject, outerProject);
-    }
-
-    /**
-     * Scans the project.
-     *
-     * <p>Takes Druid Query as input to figure out which expression can be
-     * pushed down. Also returns a map to show the correct field name in Druid
-     * Query for columns get pushed in.
-     *
-     * @param query matched Druid Query
-     * @param project Matched project that takes in Druid Query
-     * @return Pair that shows how name map with each other.
-     */
-    public Pair<ImmutableMap<String, String>, Boolean> scanProject(
-        DruidQuery query, Project project) {
-      List<String> aggNamesWithGroup = query.getRowType().getFieldNames();
-      final ImmutableMap.Builder<String, String> mapBuilder = ImmutableMap.builder();
-      int j = 0;
-      boolean ret = false;
-      for (Pair<RexNode, String> namedProject : project.getNamedProjects()) {
-        RexNode rex = namedProject.left;
-        String name = namedProject.right;
-        // Find out the corresponding fieldName for DruidQuery to fetch result
-        // in DruidConnectionImpl, give specific name for post aggregator
-        if (rex instanceof RexCall) {
-          if (checkPostAggregatorExist(rex)) {
-            String postAggName = "postagg#" + j++;
-            mapBuilder.put(name, postAggName);
-            ret = true;
-          }
-        } else if (rex instanceof RexInputRef) {
-          String fieldName = aggNamesWithGroup.get(((RexInputRef) rex).getIndex());
-          mapBuilder.put(name, fieldName);
-        }
-      }
-      return new Pair<>(mapBuilder.build(), ret);
-    }
 
-    /**
-     * Recursively check whether the rexNode can be parsed into post aggregator in druid query
-     * Have to fulfill conditions below:
-     * 1. Arithmetic operation +, -, /, * or CAST in SQL
-     * 2. Simple input reference refer to the result of Aggregate or Grouping
-     * 3. A constant
-     * 4. All input referred should also be able to be parsed
-     * @param rexNode input RexNode to be recursively checked
-     * @return a boolean shows whether this rexNode can be parsed or not.
-     */
-    public boolean checkPostAggregatorExist(RexNode rexNode) {
-      if (rexNode instanceof RexCall) {
-        for (RexNode ele : ((RexCall) rexNode).getOperands()) {
-          boolean inputRex = checkPostAggregatorExist(ele);
-          if (!inputRex) {
-            return false;
+        for (RexNode rexNode : project.getProjects()) {
+          if (DruidExpressions.toDruidExpression(rexNode, topAgg.getRowType(), query) == null) {
+            return;
           }
         }
-        switch (rexNode.getKind()) {
-        case PLUS:
-        case MINUS:
-        case DIVIDE:
-        case TIMES:
-        //case CAST:
-          return true;
-        default:
-          return false;
-        }
-      } else if (rexNode instanceof RexInputRef || rexNode instanceof RexLiteral) {
-        // Do not have to check the source of input because the signature checking ensure
-        // the input of project must be Aggregate.
-        return true;
+        final RelNode newProject = project
+            .copy(project.getTraitSet(), ImmutableList.of(Util.last(query.rels)));
+        final DruidQuery newQuery = DruidQuery.extendQuery(query, newProject);
+        call.transformTo(newQuery);
       }
-      return false;
     }
   }
 
@@ -640,28 +449,30 @@ public class DruidRules {
     public void onMatch(RelOptRuleCall call) {
       final Aggregate aggregate = call.rel(0);
       final DruidQuery query = call.rel(1);
+      final RelNode topDruidNode = query.getTopNode();
+      final Project project = topDruidNode instanceof Project ? (Project) topDruidNode : null;
       if (!DruidQuery.isValidSignature(query.signature() + 'a')) {
         return;
       }
 
       if (aggregate.indicator
-              || aggregate.getGroupSets().size() != 1
-              || BAD_AGG.apply(ImmutableTriple.of(aggregate, (RelNode) aggregate, query))
-              || !validAggregate(aggregate, query)) {
+          || aggregate.getGroupSets().size() != 1) {
         return;
       }
-      final RelNode newAggregate = aggregate.copy(aggregate.getTraitSet(),
-              ImmutableList.of(Util.last(query.rels)));
-      call.transformTo(DruidQuery.extendQuery(query, newAggregate));
-    }
-
-    /* Check whether agg functions reference timestamp */
-    private static boolean validAggregate(Aggregate aggregate, DruidQuery query) {
-      ImmutableBitSet.Builder builder = ImmutableBitSet.builder();
-      for (AggregateCall aggCall : aggregate.getAggCallList()) {
-        builder.addAll(aggCall.getArgList());
+      if (DruidQuery
+          .computeProjectGroupSet(project, aggregate.getGroupSet(), query.table.getRowType(), query)
+          == null) {
+        return;
+      }
+      final List<String> aggNames = Util
+          .skip(aggregate.getRowType().getFieldNames(), aggregate.getGroupSet().cardinality());
+      if (DruidQuery.computeDruidJsonAgg(aggregate.getAggCallList(), aggNames, project, query)
+          == null) {
+        return;
       }
-      return !checkTimestampRefOnQuery(builder.build(), query.getTopNode(), query);
+      final RelNode newAggregate = aggregate
+          .copy(aggregate.getTraitSet(), ImmutableList.of(query.getTopNode()));
+      call.transformTo(DruidQuery.extendQuery(query, newAggregate));
     }
   }
 
@@ -691,34 +502,26 @@ public class DruidRules {
       if (!DruidQuery.isValidSignature(query.signature() + 'p' + 'a')) {
         return;
       }
-
-      int timestampIdx = validProject(project, query);
-      List<Integer> filterRefs = getFilterRefs(aggregate.getAggCallList());
-
-      if (timestampIdx == -1 && filterRefs.size() == 0) {
+      if (aggregate.indicator
+          || aggregate.getGroupSets().size() != 1) {
         return;
       }
-
-      // Check that the filters that the Aggregate calls refer to are valid filters can be pushed
-      // into Druid
-      for (Integer i : filterRefs) {
-        RexNode filterNode = project.getProjects().get(i);
-        if (!query.isValidFilter(filterNode) || filterNode.isAlwaysFalse()) {
-          return;
-        }
+      if (DruidQuery
+          .computeProjectGroupSet(project, aggregate.getGroupSet(), query.table.getRowType(), query)
+          == null) {
+        return;
       }
-
-      if (aggregate.indicator
-              || aggregate.getGroupSets().size() != 1
-              || BAD_AGG.apply(ImmutableTriple.of(aggregate, (RelNode) project, query))
-              || !validAggregate(aggregate, timestampIdx, filterRefs.size())) {
+      final List<String> aggNames = Util
+          .skip(aggregate.getRowType().getFieldNames(), aggregate.getGroupSet().cardinality());
+      if (DruidQuery.computeDruidJsonAgg(aggregate.getAggCallList(), aggNames, project, query)
+          == null) {
         return;
       }
       final RelNode newProject = project.copy(project.getTraitSet(),
               ImmutableList.of(Util.last(query.rels)));
       final RelNode newAggregate = aggregate.copy(aggregate.getTraitSet(),
               ImmutableList.of(newProject));
-
+      List<Integer> filterRefs = getFilterRefs(aggregate.getAggCallList());
       final DruidQuery query2;
       if (filterRefs.size() > 0) {
         query2 = optimizeFilteredAggregations(call, query, (Project) newProject,
@@ -912,81 +715,6 @@ public class DruidRules {
       return refs;
     }
 
-    /* To be a valid Project, we allow it to contain references, and a single call
-     * to a FLOOR function on the timestamp column OR valid time EXTRACT on the timestamp column.
-     * Returns the reference to the timestamp, if any. */
-    private static int validProject(Project project, DruidQuery query) {
-      List<RexNode> nodes = project.getProjects();
-      int idxTimestamp = -1;
-      boolean hasFloor = false;
-      for (int i = 0; i < nodes.size(); i++) {
-        final RexNode e = nodes.get(i);
-        if (e instanceof RexCall) {
-          // It is a call, check that it is EXTRACT and follow-up conditions
-          final RexCall call = (RexCall) e;
-          final String timeZone = query.getCluster().getPlanner().getContext()
-              .unwrap(CalciteConnectionConfig.class).timeZone();
-          assert timeZone != null;
-          if (DruidDateTimeUtils.extractGranularity(call, timeZone) == null) {
-            return -1;
-          }
-          if (idxTimestamp != -1 && hasFloor) {
-            // Already one usage of timestamp column
-            return -1;
-          }
-          switch (call.getKind()) {
-          case FLOOR:
-            hasFloor = true;
-            if (!(call.getOperands().get(0) instanceof RexInputRef)) {
-              return -1;
-            }
-            final RexInputRef ref = (RexInputRef) call.getOperands().get(0);
-            if (!(checkTimestampRefOnQuery(ImmutableBitSet.of(ref.getIndex()),
-                query.getTopNode(),
-                query))) {
-              return -1;
-            }
-            idxTimestamp = i;
-            break;
-          case EXTRACT:
-            idxTimestamp = RelOptUtil.InputFinder.bits(call).asList().get(0);
-            break;
-          default:
-            throw new AssertionError();
-          }
-          continue;
-        }
-        if (!(e instanceof RexInputRef)) {
-          // It needs to be a reference
-          return -1;
-        }
-        final RexInputRef ref = (RexInputRef) e;
-        if (checkTimestampRefOnQuery(ImmutableBitSet.of(ref.getIndex()),
-                query.getTopNode(), query)) {
-          if (idxTimestamp != -1) {
-            // Already one usage of timestamp column
-            return -1;
-          }
-          idxTimestamp = i;
-        }
-      }
-      return idxTimestamp;
-    }
-
-    private static boolean validAggregate(Aggregate aggregate, int idx, int numFilterRefs) {
-      if (numFilterRefs > 0 && idx < 0) {
-        return true;
-      }
-      if (!aggregate.getGroupSet().get(idx)) {
-        return false;
-      }
-      for (AggregateCall aggCall : aggregate.getAggCallList()) {
-        if (aggCall.getArgList().contains(idx)) {
-          return false;
-        }
-      }
-      return true;
-    }
   }
 
   /**
@@ -1054,146 +782,20 @@ public class DruidRules {
         return;
       }
       // Either it is:
-      // - a sort and limit on a dimension/metric part of the druid group by query or
-      // - a sort without limit on the time column on top of
-      //     Agg operator (transformable to timeseries query), or
-      // - a simple limit on top of other operator than Agg
-      if (!validSortLimit(sort, query)) {
-        return;
-      }
-      final RelNode newSort = sort.copy(sort.getTraitSet(),
-              ImmutableList.of(Util.last(query.rels)));
-      call.transformTo(DruidQuery.extendQuery(query, newSort));
-    }
-
-    /** Checks whether sort is valid. */
-    private static boolean validSortLimit(Sort sort, DruidQuery query) {
+      // - a pure limit above a query of type scan
+      // - a sort and limit on a dimension/metric part of the druid group by query
       if (sort.offset != null && RexLiteral.intValue(sort.offset) != 0) {
         // offset not supported by Druid
-        return false;
-      }
-      // Use a different logic to push down Sort RelNode because the top node could be a Project now
-      RelNode topNode = query.getTopNode();
-      Aggregate topAgg;
-      if (topNode instanceof Project && ((Project) topNode).getInput() instanceof Aggregate) {
-        topAgg = (Aggregate) ((Project) topNode).getInput();
-      } else if (topNode instanceof Aggregate) {
-        topAgg = (Aggregate) topNode;
-      } else {
-        // If it is going to be a Druid select operator, we push the limit if
-        // it does not contain a sort specification (required by Druid)
-        return RelOptUtil.isPureLimit(sort);
-      }
-      final ImmutableBitSet.Builder positionsReferenced = ImmutableBitSet.builder();
-      for (RelFieldCollation col : sort.collation.getFieldCollations()) {
-        int idx = col.getFieldIndex();
-        if (idx >= topAgg.getGroupCount()) {
-          continue;
-        }
-        //has the indexes of the columns used for sorts
-        positionsReferenced.set(topAgg.getGroupSet().nth(idx));
-      }
-      // Case it is a timeseries query
-      if (checkIsFlooringTimestampRefOnQuery(topAgg.getGroupSet(), topAgg.getInput(), query)
-          && topAgg.getGroupCount() == 1) {
-        // do not push if it has a limit or more than one sort key or we have sort by
-        // metric/dimension
-        return !RelOptUtil.isLimit(sort) && sort.collation.getFieldCollations().size() == 1
-            && checkTimestampRefOnQuery(positionsReferenced.build(), topAgg.getInput(), query);
-      }
-      return true;
-    }
-  }
-
-  /** Returns true if any of the grouping key is a floor operator over the timestamp column. */
-  private static boolean checkIsFlooringTimestampRefOnQuery(ImmutableBitSet set, RelNode top,
-      DruidQuery query) {
-    if (top instanceof Project) {
-      ImmutableBitSet.Builder newSet = ImmutableBitSet.builder();
-      final Project project = (Project) top;
-      for (int index : set) {
-        RexNode node = project.getProjects().get(index);
-        if (node instanceof RexCall) {
-          RexCall call = (RexCall) node;
-          final String timeZone = query.getCluster().getPlanner().getContext()
-              .unwrap(CalciteConnectionConfig.class).timeZone();
-          assert timeZone != null;
-          assert DruidDateTimeUtils.extractGranularity(call, timeZone) != null;
-          if (call.getKind() == SqlKind.FLOOR) {
-            newSet.addAll(RelOptUtil.InputFinder.bits(call));
-          }
-        }
-      }
-      top = project.getInput();
-      set = newSet.build();
-    }
-    // Check if any references the timestamp column
-    for (int index : set) {
-      if (query.druidTable.timestampFieldName.equals(
-          top.getRowType().getFieldNames().get(index))) {
-        return true;
-      }
-    }
-
-    return false;
-  }
-
-  /** Checks whether any of the references leads to the timestamp column. */
-  private static boolean checkTimestampRefOnQuery(ImmutableBitSet set, RelNode top,
-      DruidQuery query) {
-    if (top instanceof Project) {
-      ImmutableBitSet.Builder newSet = ImmutableBitSet.builder();
-      final Project project = (Project) top;
-      for (int index : set) {
-        RexNode node = project.getProjects().get(index);
-        if (node instanceof RexInputRef) {
-          newSet.set(((RexInputRef) node).getIndex());
-        } else if (node instanceof RexCall) {
-          RexCall call = (RexCall) node;
-          final String timeZone = query.getCluster().getPlanner().getContext()
-              .unwrap(CalciteConnectionConfig.class).timeZone();
-          assert timeZone != null;
-          assert DruidDateTimeUtils.extractGranularity(call, timeZone) != null;
-          // when we have extract from time column the rexCall is of the form
-          // "/Reinterpret$0"
-          newSet.addAll(RelOptUtil.InputFinder.bits(call));
-        }
+        return;
       }
-      top = project.getInput();
-      set = newSet.build();
-    }
-
-    // Check if any references the timestamp column
-    for (int index : set) {
-      if (query.druidTable.timestampFieldName.equals(
-              top.getRowType().getFieldNames().get(index))) {
-        return true;
+      if (query.getQueryType() == QueryType.SCAN && !RelOptUtil.isPureLimit(sort)) {
+        return;
       }
-    }
 
-    return false;
-  }
-
-  /** Checks whether any of the references leads to a metric column. */
-  private static boolean checkAggregateOnMetric(ImmutableBitSet set, RelNode topProject,
-      DruidQuery query) {
-    if (topProject instanceof Project) {
-      ImmutableBitSet.Builder newSet = ImmutableBitSet.builder();
-      final Project project = (Project) topProject;
-      for (int index : set) {
-        RexNode node = project.getProjects().get(index);
-        ImmutableBitSet setOfBits = RelOptUtil.InputFinder.bits(node);
-        newSet.addAll(setOfBits);
-      }
-      set = newSet.build();
-    }
-    for (int index : set) {
-      if (query.druidTable.isMetric(query.getTopNode().getRowType().getFieldNames().get(index))) {
-        return true;
-      }
+      final RelNode newSort = sort
+          .copy(sort.getTraitSet(), ImmutableList.of(Util.last(query.rels)));
+      call.transformTo(DruidQuery.extendQuery(query, newSort));
     }
-
-    return false;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidSqlCastConverter.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidSqlCastConverter.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidSqlCastConverter.java
new file mode 100644
index 0000000..0731a6f
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidSqlCastConverter.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.sql.type.SqlTypeName;
+
+import com.google.common.collect.ImmutableList;
+
+import org.joda.time.Period;
+
+import java.util.TimeZone;
+
+/**
+ * Druid cast converter operator used to translates calcite casts to Druid expression cast
+ */
+public class DruidSqlCastConverter implements DruidSqlOperatorConverter {
+
+  @Override public SqlOperator calciteOperator() {
+    return SqlStdOperatorTable.CAST;
+  }
+
+  @Override public String toDruidExpression(RexNode rexNode, RelDataType topRel,
+      DruidQuery druidQuery) {
+
+    final RexNode operand = ((RexCall) rexNode).getOperands().get(0);
+    final String operandExpression = DruidExpressions.toDruidExpression(operand,
+        topRel, druidQuery);
+
+    if (operandExpression == null) {
+      return null;
+    }
+
+    final SqlTypeName fromType = operand.getType().getSqlTypeName();
+    final SqlTypeName toType = rexNode.getType().getSqlTypeName();
+    final String timeZoneConf = druidQuery.getConnectionConfig().timeZone();
+    final TimeZone timeZone = TimeZone.getTimeZone(timeZoneConf == null ? "UTC" : timeZoneConf);
+
+    if (SqlTypeName.CHAR_TYPES.contains(fromType) && SqlTypeName.DATETIME_TYPES.contains(toType)) {
+      //case chars to dates
+      return castCharToDateTime(timeZone, operandExpression,
+          toType);
+    } else if (SqlTypeName.DATETIME_TYPES.contains(fromType) && SqlTypeName.CHAR_TYPES.contains
+        (toType)) {
+      //case dates to chars
+      return castDateTimeToChar(timeZone, operandExpression,
+          fromType);
+    } else {
+      // Handle other casts.
+      final DruidType fromExprType = DruidExpressions.EXPRESSION_TYPES.get(fromType);
+      final DruidType toExprType = DruidExpressions.EXPRESSION_TYPES.get(toType);
+
+      if (fromExprType == null || toExprType == null) {
+        // Unknown types bail out.
+        return null;
+      }
+      final String typeCastExpression;
+      if (fromExprType != toExprType) {
+        typeCastExpression = DruidQuery.format("CAST(%s, '%s')", operandExpression,
+            toExprType
+            .toString());
+      } else {
+        // case it is the same type it is ok to skip CAST
+        typeCastExpression = operandExpression;
+      }
+
+      if (toType == SqlTypeName.DATE) {
+        // Floor to day when casting to DATE.
+        return DruidExpressions.applyTimestampFloor(
+            typeCastExpression,
+            Period.days(1).toString(),
+            "",
+            TimeZone.getTimeZone(druidQuery.getConnectionConfig().timeZone()));
+      } else {
+        return typeCastExpression;
+      }
+
+    }
+  }
+
+  private static String castCharToDateTime(
+      TimeZone timeZone,
+      String operand,
+      final SqlTypeName toType) {
+    // Cast strings to date times by parsing them from SQL format.
+    final String timestampExpression = DruidExpressions.functionCall(
+        "timestamp_parse",
+        ImmutableList.of(
+            operand,
+            DruidExpressions.stringLiteral(""),
+            DruidExpressions.stringLiteral(timeZone.getID())));
+
+    if (toType == SqlTypeName.DATE) {
+      // case to date we need to floor to day first
+      return DruidExpressions.applyTimestampFloor(
+          timestampExpression,
+          Period.days(1).toString(),
+          "",
+          timeZone);
+    } else if (toType == SqlTypeName.TIMESTAMP || toType == SqlTypeName
+        .TIMESTAMP_WITH_LOCAL_TIME_ZONE) {
+      return timestampExpression;
+    } else {
+      throw new IllegalStateException(
+          DruidQuery.format("Unsupported DateTime type[%s]", toType));
+    }
+  }
+
+  private static String castDateTimeToChar(
+      final TimeZone timeZone,
+      final String operand,
+      final SqlTypeName fromType) {
+    return DruidExpressions.functionCall(
+        "timestamp_format",
+        ImmutableList.of(
+            operand,
+            DruidExpressions.stringLiteral(dateTimeFormatString(fromType)),
+            DruidExpressions.stringLiteral(timeZone.getID())));
+  }
+
+  public static String dateTimeFormatString(final SqlTypeName sqlTypeName) {
+    if (sqlTypeName == SqlTypeName.DATE) {
+      return "yyyy-MM-dd";
+    } else if (sqlTypeName == SqlTypeName.TIMESTAMP) {
+      return "yyyy-MM-dd HH:mm:ss";
+    } else if (sqlTypeName == sqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE) {
+      return "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
+    } else {
+      return null;
+    }
+  }
+}
+
+// End DruidSqlCastConverter.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidSqlOperatorConverter.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidSqlOperatorConverter.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidSqlOperatorConverter.java
new file mode 100644
index 0000000..0ee179a
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidSqlOperatorConverter.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+
+import javax.annotation.Nullable;
+
+/**
+ * Defines how to convert RexNode with a given calcite SQL operator to Druid expressions
+ */
+public interface DruidSqlOperatorConverter {
+
+  /**
+   * Returns the calcite SQL operator corresponding to Druid operator.
+   *
+   * @return operator
+   */
+  SqlOperator calciteOperator();
+
+
+  /**
+   * Translate rexNode to valid Druid expression.
+   * @param rexNode rexNode to translate to Druid expression
+   * @param rowType row type associated with rexNode
+   * @param druidQuery druid query used to figure out configs/fields related like timeZone
+   *
+   * @return valid Druid expression or null if it can not convert the rexNode
+   */
+  @Nullable String toDruidExpression(RexNode rexNode, RelDataType rowType, DruidQuery druidQuery);
+}
+
+// End DruidSqlOperatorConverter.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidType.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidType.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidType.java
index f50fdfd..ec601b7 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidType.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidType.java
@@ -21,10 +21,10 @@ import org.apache.calcite.sql.type.SqlTypeName;
 /** Druid type. */
 public enum DruidType {
   LONG(SqlTypeName.BIGINT),
-  // SQL DOUBLE and FLOAT types are both 64 bit, but we use DOUBLE because
-  // people find FLOAT confusing.
-  FLOAT(SqlTypeName.DOUBLE),
+  FLOAT(SqlTypeName.FLOAT),
+  DOUBLE(SqlTypeName.DOUBLE),
   STRING(SqlTypeName.VARCHAR),
+  COMPLEX(SqlTypeName.OTHER),
   HYPER_UNIQUE(SqlTypeName.VARBINARY),
   THETA_SKETCH(SqlTypeName.VARBINARY);
 
@@ -39,13 +39,13 @@ public enum DruidType {
    * Returns true if and only if this enum should be used inside of a {@link ComplexMetric}
    * */
   public boolean isComplex() {
-    return this == THETA_SKETCH || this == HYPER_UNIQUE;
+    return this == THETA_SKETCH || this == HYPER_UNIQUE || this == COMPLEX;
   }
 
   /**
    * Returns a DruidType matching the given String type from a Druid metric
    * */
-  public static DruidType getTypeFromMetric(String type) {
+  protected static DruidType getTypeFromMetric(String type) {
     assert type != null;
     if (type.equals("hyperUnique")) {
       return HYPER_UNIQUE;
@@ -54,6 +54,8 @@ public enum DruidType {
     } else if (type.startsWith("long") || type.equals("count")) {
       return LONG;
     } else if (type.startsWith("double")) {
+      return DOUBLE;
+    } else if (type.startsWith("float")) {
       return FLOAT;
     }
     throw new AssertionError("Unknown type: " + type);
@@ -62,13 +64,15 @@ public enum DruidType {
   /**
    * Returns a DruidType matching the String from a meta data query
    * */
-  public static DruidType getTypeFromMetaData(String type) {
+  protected static DruidType getTypeFromMetaData(String type) {
     assert type != null;
     switch (type) {
     case "LONG":
       return LONG;
     case "FLOAT":
       return FLOAT;
+    case "DOUBLE":
+      return DOUBLE;
     case "STRING":
       return STRING;
     default:

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractOperatorConversion.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractOperatorConversion.java b/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractOperatorConversion.java
new file mode 100644
index 0000000..6e35540
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractOperatorConversion.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.avatica.util.TimeUnitRange;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+
+import com.google.common.collect.ImmutableMap;
+
+import java.util.Map;
+import java.util.TimeZone;
+
+/**
+ * Time extract operator conversion for expressions like EXTRACT(timeUnit FROM arg)
+ * Unit can be SECOND, MINUTE, HOUR, DAY (day of month),
+ * DOW (day of week), DOY (day of year), WEEK (week of week year),
+ * MONTH (1 through 12), QUARTER (1 through 4), or YEAR
+ **/
+public class ExtractOperatorConversion implements DruidSqlOperatorConverter {
+  private static final Map<TimeUnitRange, String> EXTRACT_UNIT_MAP =
+      ImmutableMap.<TimeUnitRange, String>builder()
+          .put(TimeUnitRange.SECOND, "SECOND")
+          .put(TimeUnitRange.MINUTE, "MINUTE")
+          .put(TimeUnitRange.HOUR, "HOUR")
+          .put(TimeUnitRange.DAY, "DAY")
+          .put(TimeUnitRange.DOW, "DOW")
+          .put(TimeUnitRange.DOY, "DOY")
+          .put(TimeUnitRange.WEEK, "WEEK")
+          .put(TimeUnitRange.MONTH, "MONTH")
+          .put(TimeUnitRange.QUARTER, "QUARTER")
+          .put(TimeUnitRange.YEAR, "YEAR")
+          .build();
+
+  @Override public SqlOperator calciteOperator() {
+    return SqlStdOperatorTable.EXTRACT;
+  }
+
+  @Override public String toDruidExpression(
+      RexNode rexNode, RelDataType rowType, DruidQuery query) {
+
+    final RexCall call = (RexCall) rexNode;
+    final RexLiteral flag = (RexLiteral) call.getOperands().get(0);
+    final TimeUnitRange calciteUnit = (TimeUnitRange) flag.getValue();
+    final RexNode arg = call.getOperands().get(1);
+
+    final String input = DruidExpressions.toDruidExpression(arg, rowType, query);
+    if (input == null) {
+      return null;
+    }
+
+    final String druidUnit = EXTRACT_UNIT_MAP.get(calciteUnit);
+    if (druidUnit == null) {
+      return null;
+    }
+
+    return DruidExpressions.applyTimeExtract(
+        input, druidUnit, TimeZone.getTimeZone(query.getConnectionConfig().timeZone()));
+  }
+}
+
+// End ExtractOperatorConversion.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractionDimensionSpec.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractionDimensionSpec.java b/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractionDimensionSpec.java
index 601fc89..0aece36 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractionDimensionSpec.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractionDimensionSpec.java
@@ -21,6 +21,8 @@ import com.google.common.base.Preconditions;
 
 import java.io.IOException;
 
+import javax.annotation.Nullable;
+
 import static org.apache.calcite.adapter.druid.DruidQuery.writeField;
 import static org.apache.calcite.adapter.druid.DruidQuery.writeFieldIf;
 
@@ -34,18 +36,37 @@ public class ExtractionDimensionSpec implements DimensionSpec {
   private final String dimension;
   private final ExtractionFunction extractionFunction;
   private final String outputName;
+  private final DruidType outputType;
 
   public ExtractionDimensionSpec(String dimension, ExtractionFunction extractionFunction,
       String outputName) {
+    this(dimension, extractionFunction, outputName, DruidType.STRING);
+  }
+
+  public ExtractionDimensionSpec(String dimension, ExtractionFunction extractionFunction,
+      String outputName, DruidType outputType) {
     this.dimension = Preconditions.checkNotNull(dimension);
     this.extractionFunction = Preconditions.checkNotNull(extractionFunction);
     this.outputName = outputName;
+    this.outputType = outputType == null ? DruidType.STRING : outputType;
   }
 
-  public String getOutputName() {
+  @Override public String getOutputName() {
     return outputName;
   }
 
+  @Override public DruidType getOutputType() {
+    return outputType;
+  }
+
+  @Override public ExtractionFunction getExtractionFn() {
+    return extractionFunction;
+  }
+
+  @Override public String getDimension() {
+    return dimension;
+  }
+
   @Override public void write(JsonGenerator generator) throws IOException {
     generator.writeStartObject();
     generator.writeStringField("type", "extraction");
@@ -55,6 +76,33 @@ public class ExtractionDimensionSpec implements DimensionSpec {
     generator.writeEndObject();
   }
 
+  /**
+   * @param dimensionSpec Druid Dimesion spec object
+   *
+   * @return valid {@link Granularity} of floor extract or null when not possible.
+   */
+  @Nullable
+  public static Granularity toQueryGranularity(DimensionSpec dimensionSpec) {
+    if (!DruidTable.DEFAULT_TIMESTAMP_COLUMN.equals(dimensionSpec.getDimension())) {
+      // Only __time column can be substituted by granularity
+      return null;
+    }
+    final ExtractionFunction extractionFunction = dimensionSpec.getExtractionFn();
+    if (extractionFunction == null) {
+      // No Extract thus no Granularity
+      return null;
+    }
+    if (extractionFunction instanceof TimeExtractionFunction) {
+      Granularity granularity = ((TimeExtractionFunction) extractionFunction).getGranularity();
+      String format = ((TimeExtractionFunction) extractionFunction).getFormat();
+      if (!TimeExtractionFunction.ISO_TIME_FORMAT.equals(format)) {
+        return null;
+      }
+      return granularity;
+    }
+    return null;
+  }
+
 }
 
 // End ExtractionDimensionSpec.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractionFunction.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractionFunction.java b/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractionFunction.java
index 8143f8c..d572514 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractionFunction.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/ExtractionFunction.java
@@ -21,7 +21,7 @@ package org.apache.calcite.adapter.druid;
  *
  * <p>Extraction functions define the transformation applied to each dimension value.
  */
-public interface ExtractionFunction extends DruidQuery.Json {
+public interface ExtractionFunction extends DruidJson {
 }
 
 // End ExtractionFunction.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/FloorOperatorConversion.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/FloorOperatorConversion.java b/druid/src/main/java/org/apache/calcite/adapter/druid/FloorOperatorConversion.java
new file mode 100644
index 0000000..0d8ecc1
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/FloorOperatorConversion.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+
+import java.util.TimeZone;
+
+import javax.annotation.Nullable;
+
+
+/**
+ * DruidSqlOperatorConverter implementation that handles Floor operations conversions
+ */
+public class FloorOperatorConversion implements DruidSqlOperatorConverter {
+  @Override public SqlOperator calciteOperator() {
+    return SqlStdOperatorTable.FLOOR;
+  }
+
+  @Nullable
+  @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+    final RexCall call = (RexCall) rexNode;
+    final RexNode arg = call.getOperands().get(0);
+    final String druidExpression = DruidExpressions.toDruidExpression(
+        arg,
+        rowType,
+        druidQuery);
+    if (druidExpression == null) {
+      return null;
+    } else if (call.getOperands().size() == 1) {
+      // case FLOOR(expr)
+      return  DruidQuery.format("floor(%s)", druidExpression);
+    } else if (call.getOperands().size() == 2) {
+      // FLOOR(expr TO timeUnit)
+      final Granularity granularity = DruidDateTimeUtils
+          .extractGranularity(call, druidQuery.getConnectionConfig().timeZone());
+      if (granularity == null) {
+        return null;
+      }
+      String isoPeriodFormat = DruidDateTimeUtils.toISOPeriodFormat(granularity.getType());
+      if (isoPeriodFormat == null) {
+        return null;
+      }
+      return DruidExpressions.applyTimestampFloor(
+          druidExpression,
+          isoPeriodFormat,
+          "",
+          TimeZone.getTimeZone(druidQuery.getConnectionConfig().timeZone()));
+    } else {
+      return null;
+    }
+  }
+}
+
+// End FloorOperatorConversion.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/Granularities.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/Granularities.java b/druid/src/main/java/org/apache/calcite/adapter/druid/Granularities.java
index df1b291..2015075 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/Granularities.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/Granularities.java
@@ -72,9 +72,7 @@ public class Granularities {
     INSTANCE;
 
     @Override public void write(JsonGenerator generator) throws IOException {
-      generator.writeStartObject();
-      generator.writeStringField("type", "all");
-      generator.writeEndObject();
+      generator.writeObject("all");
     }
 
     @Nonnull public Type getType() {

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/Granularity.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/Granularity.java b/druid/src/main/java/org/apache/calcite/adapter/druid/Granularity.java
index ffedec2..f70fd18 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/Granularity.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/Granularity.java
@@ -32,7 +32,7 @@ import javax.annotation.Nonnull;
  *
  * @see Granularities
  */
-public interface Granularity extends DruidQuery.Json {
+public interface Granularity extends DruidJson {
   /** Type of supported periods for granularity. */
   enum Type {
     ALL,

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/NaryOperatorConverter.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/NaryOperatorConverter.java b/druid/src/main/java/org/apache/calcite/adapter/druid/NaryOperatorConverter.java
new file mode 100644
index 0000000..961454b
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/NaryOperatorConverter.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+
+import com.google.common.base.Preconditions;
+
+import java.util.List;
+
+import javax.annotation.Nullable;
+
+/**
+ * Converts Calcite n-ary operators to druid expression eg (arg1 Op arg2 Op arg3)
+ */
+public class NaryOperatorConverter implements DruidSqlOperatorConverter {
+  private final SqlOperator operator;
+  private final String druidOperatorName;
+
+  public NaryOperatorConverter(SqlOperator operator, String druidOperatorName) {
+    this.operator = Preconditions.checkNotNull(operator);
+    this.druidOperatorName = Preconditions.checkNotNull(druidOperatorName);
+  }
+
+  @Override public SqlOperator calciteOperator() {
+    return operator;
+  }
+
+  @Nullable
+  @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+    final RexCall call = (RexCall) rexNode;
+    final List<String> druidExpressions = DruidExpressions.toDruidExpressions(
+        druidQuery, rowType,
+        call.getOperands());
+    if (druidExpressions == null) {
+      return null;
+    }
+    return DruidExpressions.nAryOperatorCall(druidOperatorName, druidExpressions);
+  }
+}
+
+// End NaryOperatorConverter.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/SubstringOperatorConversion.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/SubstringOperatorConversion.java b/druid/src/main/java/org/apache/calcite/adapter/druid/SubstringOperatorConversion.java
new file mode 100644
index 0000000..d2342f3
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/SubstringOperatorConversion.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+
+import javax.annotation.Nullable;
+
+/**
+ * Converts Calcite SUBSTRING call to Druid Expression when possible
+ */
+public class SubstringOperatorConversion implements DruidSqlOperatorConverter {
+  @Override public SqlOperator calciteOperator() {
+    return SqlStdOperatorTable.SUBSTRING;
+  }
+
+  @Nullable
+  @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType,
+      DruidQuery query) {
+    final RexCall call = (RexCall) rexNode;
+    final String arg = DruidExpressions.toDruidExpression(
+        call.getOperands().get(0), rowType, query);
+    if (arg == null) {
+      return null;
+    }
+
+    final int index = RexLiteral.intValue(call.getOperands().get(1)) - 1;
+    // SQL is 1-indexed, Druid is 0-indexed.
+    final int length;
+    if (call.getOperands().size() > 2) {
+      //case substring from index with length
+      length = RexLiteral.intValue(call.getOperands().get(2));
+    } else {
+      //case substring from index to the end
+      length = -1;
+    }
+    return DruidQuery.format("substring(%s, %s, %s)",
+        arg,
+        DruidExpressions.numberLiteral(index),
+        DruidExpressions.numberLiteral(length));
+  }
+}
+
+// End SubstringOperatorConversion.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/TimeExtractionDimensionSpec.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/TimeExtractionDimensionSpec.java b/druid/src/main/java/org/apache/calcite/adapter/druid/TimeExtractionDimensionSpec.java
deleted file mode 100644
index 7ef19a6..0000000
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/TimeExtractionDimensionSpec.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.calcite.adapter.druid;
-
-/**
- * DimensionSpec implementation that uses a time format extraction function.
- */
-public class TimeExtractionDimensionSpec extends ExtractionDimensionSpec {
-
-  public TimeExtractionDimensionSpec(
-      ExtractionFunction extractionFunction, String outputName) {
-    super(DruidTable.DEFAULT_TIMESTAMP_COLUMN, extractionFunction, outputName);
-  }
-
-  /**
-   * Creates a time extraction DimensionSpec that renames the '__time' column
-   * to the given name.
-   *
-   * @param outputName name of the output column
-   *
-   * @return the time extraction DimensionSpec instance
-   */
-  public static TimeExtractionDimensionSpec makeFullTimeExtract(
-      String outputName, String timeZone) {
-    return new TimeExtractionDimensionSpec(
-        TimeExtractionFunction.createDefault(timeZone), outputName);
-  }
-
-  /**
-   * Creates a time extraction DimensionSpec that formats the '__time' column
-   * according to the given granularity and outputs the column with the given
-   * name. See {@link TimeExtractionFunction#VALID_TIME_EXTRACT} for set of valid extract
-   *
-   * @param granularity granularity to apply to the column
-   * @param outputName  name of the output column
-   *
-   * @return time field extraction DimensionSpec instance or null if granularity
-   * is not supported
-   */
-  public static TimeExtractionDimensionSpec makeTimeExtract(
-      Granularity granularity, String outputName, String timeZone) {
-    return new TimeExtractionDimensionSpec(
-        TimeExtractionFunction.createExtractFromGranularity(granularity, timeZone), outputName);
-  }
-
-  /**
-   * Creates floor time extraction dimension spec from Granularity with a given output name
-   * @param granularity granularity to apply to the time column
-   * @param outputName name of the output column
-   *
-   * @return floor time extraction DimensionSpec instance.
-   */
-  public static TimeExtractionDimensionSpec makeTimeFloor(Granularity granularity,
-      String outputName, String timeZone) {
-    ExtractionFunction fn =
-        TimeExtractionFunction.createFloorFromGranularity(granularity, timeZone);
-    return new TimeExtractionDimensionSpec(fn, outputName);
-  }
-}
-
-// End TimeExtractionDimensionSpec.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/TimeExtractionFunction.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/TimeExtractionFunction.java b/druid/src/main/java/org/apache/calcite/adapter/druid/TimeExtractionFunction.java
index 61e72e0..5b0265e 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/TimeExtractionFunction.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/TimeExtractionFunction.java
@@ -22,12 +22,18 @@ import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.sql.SqlKind;
 
+import org.apache.calcite.sql.type.SqlTypeFamily;
+import org.apache.calcite.sql.type.SqlTypeName;
+
 import com.fasterxml.jackson.core.JsonGenerator;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 
 import java.io.IOException;
 import java.util.Locale;
+import java.util.TimeZone;
+
+import javax.annotation.Nullable;
 
 import static org.apache.calcite.adapter.druid.DruidQuery.writeFieldIf;
 
@@ -51,6 +57,15 @@ public class TimeExtractionFunction implements ExtractionFunction {
       TimeUnitRange.MINUTE,
       TimeUnitRange.SECOND);
 
+  private static final ImmutableSet<TimeUnitRange> VALID_TIME_FLOOR = Sets.immutableEnumSet(
+      TimeUnitRange.YEAR,
+      TimeUnitRange.MONTH,
+      TimeUnitRange.DAY,
+      TimeUnitRange.WEEK,
+      TimeUnitRange.HOUR,
+      TimeUnitRange.MINUTE,
+      TimeUnitRange.SECOND);
+
   public static final String ISO_TIME_FORMAT = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'";
 
   private final String format;
@@ -76,6 +91,14 @@ public class TimeExtractionFunction implements ExtractionFunction {
     generator.writeEndObject();
   }
 
+  public String getFormat() {
+    return format;
+  }
+  public Granularity getGranularity() {
+    return granularity;
+  }
+
+
   /**
    * Creates the default time format extraction function.
    *
@@ -94,7 +117,7 @@ public class TimeExtractionFunction implements ExtractionFunction {
    */
   public static TimeExtractionFunction createExtractFromGranularity(
       Granularity granularity, String timeZone) {
-    final String local = Locale.ROOT.toLanguageTag();
+    final String local = Locale.US.toLanguageTag();
     switch (granularity.getType()) {
     case DAY:
       return new TimeExtractionFunction("d", null, timeZone, local);
@@ -135,11 +158,12 @@ public class TimeExtractionFunction implements ExtractionFunction {
    *
    * @return true if the extract unit is valid
    */
+
   public static boolean isValidTimeExtract(RexNode rexNode) {
-    if (rexNode.getKind() != SqlKind.EXTRACT) {
+    final RexCall call = (RexCall) rexNode;
+    if (call.getKind() != SqlKind.EXTRACT || call.getOperands().size() != 2) {
       return false;
     }
-    final RexCall call = (RexCall) rexNode;
     final RexLiteral flag = (RexLiteral) call.operands.get(0);
     final TimeUnitRange timeUnit = (TimeUnitRange) flag.getValue();
     return timeUnit != null && VALID_TIME_EXTRACT.contains(timeUnit);
@@ -163,7 +187,38 @@ public class TimeExtractionFunction implements ExtractionFunction {
     }
     final RexLiteral flag = (RexLiteral) call.operands.get(1);
     final TimeUnitRange timeUnit = (TimeUnitRange) flag.getValue();
-    return timeUnit != null && VALID_TIME_EXTRACT.contains(timeUnit);
+    return timeUnit != null && VALID_TIME_FLOOR.contains(timeUnit);
+  }
+
+  /**
+   * @param rexNode cast RexNode
+   * @param timeZone timezone
+   *
+   * @return Druid Time extraction function or null when can not translate the cast.
+   */
+  @Nullable
+  public static TimeExtractionFunction translateCastToTimeExtract(RexNode rexNode,
+      TimeZone timeZone) {
+    assert rexNode.getKind() == SqlKind.CAST;
+    final RexCall rexCall = (RexCall) rexNode;
+    final String castFormat = DruidSqlCastConverter
+        .dateTimeFormatString(rexCall.getType().getSqlTypeName());
+    final String timeZoneId = timeZone == null ? null : timeZone.getID();
+    if (castFormat == null) {
+      // unknown format
+      return null;
+    }
+    if (rexCall.getType().getFamily() == SqlTypeFamily.DATE) {
+      return new TimeExtractionFunction(castFormat,
+          Granularities.createGranularity(TimeUnitRange.DAY, timeZoneId), timeZoneId,
+          Locale.ENGLISH.toString());
+    }
+    if (rexCall.getType().getSqlTypeName() == SqlTypeName.TIMESTAMP
+        || rexCall.getType().getSqlTypeName() == SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE) {
+      return new TimeExtractionFunction(castFormat, null, timeZoneId, Locale.ENGLISH.toString());
+    }
+
+    return null;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/UnaryPrefixOperatorConversion.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/UnaryPrefixOperatorConversion.java b/druid/src/main/java/org/apache/calcite/adapter/druid/UnaryPrefixOperatorConversion.java
new file mode 100644
index 0000000..a8e5da3
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/UnaryPrefixOperatorConversion.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+
+import com.google.common.collect.Iterables;
+
+import java.util.List;
+
+/**
+ * Unary prefix Operator conversion class used to convert expression like Unary NOT and Minus
+ */
+public class UnaryPrefixOperatorConversion implements DruidSqlOperatorConverter {
+
+  private final SqlOperator operator;
+  private final String druidOperator;
+
+  public UnaryPrefixOperatorConversion(final SqlOperator operator, final String druidOperator) {
+    this.operator = operator;
+    this.druidOperator = druidOperator;
+  }
+
+  @Override public SqlOperator calciteOperator() {
+    return operator;
+  }
+
+  @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+
+    final RexCall call = (RexCall) rexNode;
+
+    final List<String> druidExpressions = DruidExpressions.toDruidExpressions(
+        druidQuery, rowType,
+        call.getOperands());
+
+    if (druidExpressions == null) {
+      return null;
+    }
+
+    return DruidQuery
+        .format("(%s %s)", druidOperator, Iterables.getOnlyElement(druidExpressions));
+  }
+}
+
+// End UnaryPrefixOperatorConversion.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/UnarySuffixOperatorConversion.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/UnarySuffixOperatorConversion.java b/druid/src/main/java/org/apache/calcite/adapter/druid/UnarySuffixOperatorConversion.java
new file mode 100644
index 0000000..015415f
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/UnarySuffixOperatorConversion.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.SqlOperator;
+
+import com.google.common.collect.Iterables;
+
+import java.util.List;
+
+/**
+ * Unary suffix operator conversion, used to convert function like: expression Unary_Operator
+ */
+public class UnarySuffixOperatorConversion implements DruidSqlOperatorConverter {
+  private final SqlOperator operator;
+  private final String druidOperator;
+
+  public UnarySuffixOperatorConversion(SqlOperator operator, String druidOperator) {
+    this.operator = operator;
+    this.druidOperator = druidOperator;
+  }
+
+  @Override public SqlOperator calciteOperator() {
+    return operator;
+  }
+
+  @Override public String toDruidExpression(RexNode rexNode, RelDataType rowType,
+      DruidQuery druidQuery) {
+    final RexCall call = (RexCall) rexNode;
+
+    final List<String> druidExpressions = DruidExpressions.toDruidExpressions(
+        druidQuery, rowType,
+        call.getOperands());
+
+    if (druidExpressions == null) {
+      return null;
+    }
+
+    return DruidQuery.format(
+            "(%s %s)",
+            Iterables.getOnlyElement(druidExpressions), druidOperator);
+  }
+}
+
+// End UnarySuffixOperatorConversion.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/VirtualColumn.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/VirtualColumn.java b/druid/src/main/java/org/apache/calcite/adapter/druid/VirtualColumn.java
new file mode 100644
index 0000000..7348cec
--- /dev/null
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/VirtualColumn.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.adapter.druid;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.google.common.base.Preconditions;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import static org.apache.calcite.adapter.druid.DruidQuery.writeFieldIf;
+
+/**
+ * Druid Json Expression based Virtual Column.
+ * Virtual columns is used as "projection" concept throughout Druid using expression.
+ */
+public class VirtualColumn implements DruidJson {
+  private final String name;
+
+  private final String expression;
+
+  private final DruidType outputType;
+
+  public VirtualColumn(String name, String expression, DruidType outputType) {
+    this.name = Preconditions.checkNotNull(name);
+    this.expression = Preconditions.checkNotNull(expression);
+    this.outputType = outputType == null ? DruidType.FLOAT : outputType;
+  }
+
+  @Override public void write(JsonGenerator generator) throws IOException {
+    generator.writeStartObject();
+    generator.writeStringField("type", "expression");
+    generator.writeStringField("name", name);
+    generator.writeStringField("expression", expression);
+    writeFieldIf(generator, "outputType", getOutputType().toString().toUpperCase(Locale.ENGLISH));
+    generator.writeEndObject();
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public String getExpression() {
+    return expression;
+  }
+
+  public DruidType getOutputType() {
+    return outputType;
+  }
+
+  /**
+   * Virtual Column Builder
+   */
+  public static class Builder {
+    private String name;
+
+    private String expression;
+
+    private DruidType type;
+
+    public Builder withName(String name) {
+      this.name = name;
+      return this;
+    }
+
+    public Builder withExpression(String expression) {
+      this.expression = expression;
+      return this;
+    }
+
+    public Builder withType(DruidType type) {
+      this.type = type;
+      return this;
+    }
+
+    public VirtualColumn build() {
+      return new VirtualColumn(name, expression, type);
+    }
+  }
+
+  public static Builder builder() {
+    return new Builder();
+  }
+}
+
+// End VirtualColumn.java

http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/test/java/org/apache/calcite/adapter/druid/DruidQueryFilterTest.java
----------------------------------------------------------------------
diff --git a/druid/src/test/java/org/apache/calcite/adapter/druid/DruidQueryFilterTest.java b/druid/src/test/java/org/apache/calcite/adapter/druid/DruidQueryFilterTest.java
index e8e42be..16e1f59 100644
--- a/druid/src/test/java/org/apache/calcite/adapter/druid/DruidQueryFilterTest.java
+++ b/druid/src/test/java/org/apache/calcite/adapter/druid/DruidQueryFilterTest.java
@@ -16,6 +16,7 @@
  */
 package org.apache.calcite.adapter.druid;
 
+import org.apache.calcite.config.CalciteConnectionConfig;
 import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeSystem;
@@ -30,13 +31,12 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
 import java.io.IOException;
 import java.io.StringWriter;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
 import java.math.BigDecimal;
 import java.util.List;
 
@@ -47,8 +47,22 @@ import static org.hamcrest.core.Is.is;
  */
 public class DruidQueryFilterTest {
 
-  @Test public void testInFilter() throws NoSuchMethodException,
-      InvocationTargetException, IllegalAccessException, IOException {
+  private DruidQuery druidQuery;
+  @Before
+  public void testSetup() {
+    druidQuery = Mockito.mock(DruidQuery.class);
+    final CalciteConnectionConfig connectionConfigMock = Mockito
+        .mock(CalciteConnectionConfig.class);
+    Mockito.when(connectionConfigMock.timeZone()).thenReturn("UTC");
+    Mockito.when(druidQuery.getConnectionConfig()).thenReturn(connectionConfigMock);
+    Mockito.when(druidQuery.getDruidTable())
+        .thenReturn(
+            new DruidTable(Mockito.mock(DruidSchema.class), "dataSource", null,
+                ImmutableSet.<String>of(), "timestamp", null, null,
+                null
+            ));
+  }
+  @Test public void testInFilter() throws IOException {
     final Fixture f = new Fixture();
     final List<? extends RexNode> listRexNodes =
         ImmutableList.of(f.rexBuilder.makeInputRef(f.varcharRowType, 0),
@@ -58,13 +72,9 @@ public class DruidQueryFilterTest {
 
     RexNode inRexNode =
         f.rexBuilder.makeCall(SqlStdOperatorTable.IN, listRexNodes);
-    Method translateFilter =
-        DruidQuery.Translator.class.getDeclaredMethod("translateFilter",
-            RexNode.class);
-    translateFilter.setAccessible(true);
-    DruidQuery.JsonInFilter returnValue =
-        (DruidQuery.JsonInFilter) translateFilter.invoke(f.translatorStringKind,
-            inRexNode);
+    DruidJsonFilter returnValue = DruidJsonFilter
+        .toDruidFilters(inRexNode, f.varcharRowType, druidQuery);
+    Assert.assertNotNull("Filter is null", returnValue);
     JsonFactory jsonFactory = new JsonFactory();
     final StringWriter sw = new StringWriter();
     JsonGenerator jsonGenerator = jsonFactory.createGenerator(sw);
@@ -76,8 +86,7 @@ public class DruidQueryFilterTest {
             + "\"values\":[\"1\",\"5\",\"value1\"]}"));
   }
 
-  @Test public void testBetweenFilterStringCase() throws NoSuchMethodException,
-      InvocationTargetException, IllegalAccessException, IOException {
+  @Test public void testBetweenFilterStringCase() throws IOException {
     final Fixture f = new Fixture();
     final List<RexNode> listRexNodes =
         ImmutableList.of(f.rexBuilder.makeLiteral(false),
@@ -88,13 +97,9 @@ public class DruidQueryFilterTest {
     RexNode betweenRexNode = f.rexBuilder.makeCall(relDataType,
         SqlStdOperatorTable.BETWEEN, listRexNodes);
 
-    Method translateFilter =
-        DruidQuery.Translator.class.getDeclaredMethod("translateFilter",
-            RexNode.class);
-    translateFilter.setAccessible(true);
-    DruidQuery.JsonBound returnValue =
-        (DruidQuery.JsonBound) translateFilter.invoke(f.translatorStringKind,
-            betweenRexNode);
+    DruidJsonFilter returnValue = DruidJsonFilter
+        .toDruidFilters(betweenRexNode, f.varcharRowType, druidQuery);
+    Assert.assertNotNull("Filter is null", returnValue);
     JsonFactory jsonFactory = new JsonFactory();
     final StringWriter sw = new StringWriter();
     JsonGenerator jsonGenerator = jsonFactory.createGenerator(sw);
@@ -120,8 +125,6 @@ public class DruidQueryFilterTest {
     final RelDataType varcharRowType = typeFactory.builder()
         .add("dimensionName", varcharType)
         .build();
-    final DruidQuery.Translator translatorStringKind =
-        new DruidQuery.Translator(druidTable, varcharRowType, "UTC");
   }
 }
 


[3/4] calcite git commit: [CALCITE-2170] Use Druid Expressions capabilities to improve the amount of work that can be pushed to Druid

Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/calcite/blob/98f3704e/druid/src/main/java/org/apache/calcite/adapter/druid/DruidQuery.java
----------------------------------------------------------------------
diff --git a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidQuery.java b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidQuery.java
index 4056412..aa9a02a 100644
--- a/druid/src/main/java/org/apache/calcite/adapter/druid/DruidQuery.java
+++ b/druid/src/main/java/org/apache/calcite/adapter/druid/DruidQuery.java
@@ -19,7 +19,6 @@ package org.apache.calcite.adapter.druid;
 import org.apache.calcite.DataContext;
 import org.apache.calcite.avatica.ColumnMetaData;
 import org.apache.calcite.config.CalciteConnectionConfig;
-import org.apache.calcite.config.CalciteConnectionProperty;
 import org.apache.calcite.interpreter.BindableRel;
 import org.apache.calcite.interpreter.Bindables;
 import org.apache.calcite.interpreter.Compiler;
@@ -32,7 +31,6 @@ import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptTable;
-import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.AbstractRelNode;
 import org.apache.calcite.rel.RelFieldCollation;
@@ -53,59 +51,118 @@ import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexInputRef;
 import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.runtime.Hook;
 import org.apache.calcite.schema.ScannableTable;
 import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlOperator;
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.type.SqlTypeFamily;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.sql.validate.SqlValidatorUtil;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.Litmus;
 import org.apache.calcite.util.Pair;
-import org.apache.calcite.util.TimestampString;
 import org.apache.calcite.util.Util;
 
 import com.fasterxml.jackson.core.JsonFactory;
 import com.fasterxml.jackson.core.JsonGenerator;
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.base.Strings;
+import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
+import com.google.common.collect.Maps;
 
 import org.joda.time.Interval;
 
 import java.io.IOException;
 import java.io.StringWriter;
-import java.math.BigDecimal;
-import java.text.SimpleDateFormat;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Locale;
+import java.util.Map;
 import java.util.Objects;
-import java.util.Properties;
 import java.util.Set;
 import java.util.TimeZone;
 import java.util.regex.Pattern;
 
-import static org.apache.calcite.sql.SqlKind.INPUT_REF;
+import javax.annotation.Nullable;
 
 /**
  * Relational expression representing a scan of a Druid data set.
  */
 public class DruidQuery extends AbstractRelNode implements BindableRel {
 
+  /**
+   * Provides a standard list of supported Calcite operators that can be converted to
+   * Druid Expressions. This can be used as is or re-adapted based on underline
+   * engine operator syntax.
+   */
+  public static final List<DruidSqlOperatorConverter> DEFAULT_OPERATORS_LIST =
+      ImmutableList.<DruidSqlOperatorConverter>builder()
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.EXP, "exp"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.CONCAT, "concat"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.DIVIDE_INTEGER, "div"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.LIKE, "like"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.LN, "log"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.SQRT, "sqrt"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.LOWER, "lower"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.LOG10, "log10"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.REPLACE, "replace"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.UPPER, "upper"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.POWER, "pow"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.ABS, "abs"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.SIN, "sin"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.COS, "cos"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.TAN, "tan"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.CASE, "case_searched"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.CHAR_LENGTH, "strlen"))
+          .add(new DirectOperatorConversion(SqlStdOperatorTable.CHARACTER_LENGTH, "strlen"))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.EQUALS, "=="))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.NOT_EQUALS, "!="))
+          .add(new NaryOperatorConverter(SqlStdOperatorTable.OR, "||"))
+          .add(new NaryOperatorConverter(SqlStdOperatorTable.AND, "&&"))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.LESS_THAN, "<"))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.LESS_THAN_OR_EQUAL, "<="))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.GREATER_THAN, ">"))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL, ">="))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.PLUS, "+"))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.MINUS, "-"))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.MULTIPLY, "*"))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.DIVIDE, "/"))
+          .add(new BinaryOperatorConversion(SqlStdOperatorTable.MOD, "%"))
+          .add(new DruidSqlCastConverter())
+          .add(new ExtractOperatorConversion())
+          .add(new UnaryPrefixOperatorConversion(SqlStdOperatorTable.NOT, "!"))
+          .add(new UnaryPrefixOperatorConversion(SqlStdOperatorTable.UNARY_MINUS, "-"))
+          .add(new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_FALSE, "<= 0"))
+          .add(new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NOT_TRUE, "<= 0"))
+          .add(new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_TRUE, "> 0"))
+          .add(new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NOT_FALSE, "> 0"))
+          .add(new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NULL, "== null"))
+          .add(new UnarySuffixOperatorConversion(SqlStdOperatorTable.IS_NOT_NULL, "!= null"))
+          .add(new FloorOperatorConversion())
+          .add(new CeilOperatorConversion())
+          .add(new SubstringOperatorConversion())
+          .build();
   protected QuerySpec querySpec;
 
   final RelOptTable table;
   final DruidTable druidTable;
   final ImmutableList<Interval> intervals;
   final ImmutableList<RelNode> rels;
+  /**
+   * This operator map provides DruidSqlOperatorConverter instance to convert a Calcite RexNode to
+   * Druid Expression when possible.
+   */
+  final Map<SqlOperator, DruidSqlOperatorConverter> converterOperatorMap;
 
-  private static final Pattern VALID_SIG = Pattern.compile("sf?p?(a?|ao)l?");
+  private static final Pattern VALID_SIG = Pattern.compile("sf?p?(a?|ah|ah?o)l?");
   private static final String EXTRACT_COLUMN_NAME_PREFIX = "extract";
   private static final String FLOOR_COLUMN_NAME_PREFIX = "floor";
   protected static final String DRUID_QUERY_FETCH = "druid.query.fetch";
@@ -120,25 +177,236 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
    * @param druidTable     Druid table
    * @param intervals      Intervals for the query
    * @param rels           Internal relational expressions
+   * @param converterOperatorMap mapping of Calcite Sql Operator to Druid Expression API.
    */
   protected DruidQuery(RelOptCluster cluster, RelTraitSet traitSet,
       RelOptTable table, DruidTable druidTable,
-      List<Interval> intervals, List<RelNode> rels) {
+      List<Interval> intervals, List<RelNode> rels,
+      Map<SqlOperator, DruidSqlOperatorConverter> converterOperatorMap) {
     super(cluster, traitSet);
     this.table = table;
     this.druidTable = druidTable;
     this.intervals = ImmutableList.copyOf(intervals);
     this.rels = ImmutableList.copyOf(rels);
-
+    this.converterOperatorMap = Preconditions.checkNotNull(converterOperatorMap, "Operator map "
+        + "can not be null");
     assert isValid(Litmus.THROW, null);
   }
 
+  /** Returns whether a signature represents an sequence of relational operators
+   * that can be translated into a valid Druid query. */
+  static boolean isValidSignature(String signature) {
+    return VALID_SIG.matcher(signature).matches();
+  }
+
+  /** Creates a DruidQuery. */
+  public static DruidQuery create(RelOptCluster cluster, RelTraitSet traitSet,
+      RelOptTable table, DruidTable druidTable, List<RelNode> rels) {
+    final ImmutableMap converterOperatorMap = ImmutableMap.<SqlOperator,
+        DruidSqlOperatorConverter>builder().putAll(
+        Lists.transform(DEFAULT_OPERATORS_LIST, new Function<DruidSqlOperatorConverter,
+            Map.Entry<SqlOperator, DruidSqlOperatorConverter>>() {
+          @Nullable @Override public Map.Entry<SqlOperator, DruidSqlOperatorConverter> apply(
+              final DruidSqlOperatorConverter input) {
+            return Maps.immutableEntry(input.calciteOperator(), input);
+          }
+        })).build();
+    return create(cluster, traitSet, table, druidTable, druidTable.intervals, rels,
+        converterOperatorMap);
+  }
+
+  /** Creates a DruidQuery. */
+  public static DruidQuery create(RelOptCluster cluster, RelTraitSet traitSet,
+      RelOptTable table, DruidTable druidTable, List<RelNode> rels,
+      Map<SqlOperator, DruidSqlOperatorConverter> converterOperatorMap) {
+    return create(cluster, traitSet, table, druidTable, druidTable.intervals, rels,
+        converterOperatorMap);
+  }
+
+  /**
+   * Creates a DruidQuery.
+   */
+  private static DruidQuery create(RelOptCluster cluster, RelTraitSet traitSet,
+      RelOptTable table, DruidTable druidTable, List<Interval> intervals,
+      List<RelNode> rels, Map<SqlOperator, DruidSqlOperatorConverter> converterOperatorMap) {
+    return new DruidQuery(cluster, traitSet, table, druidTable, intervals, rels,
+        converterOperatorMap);
+  }
+
+  /** Extends a DruidQuery. */
+  public static DruidQuery extendQuery(DruidQuery query, RelNode r) {
+    final ImmutableList.Builder<RelNode> builder = ImmutableList.builder();
+    return DruidQuery.create(query.getCluster(), r.getTraitSet().replace(query.getConvention()),
+        query.getTable(), query.druidTable, query.intervals,
+        builder.addAll(query.rels).add(r).build(), query.getOperatorConversionMap());
+  }
+
+  /** Extends a DruidQuery. */
+  public static DruidQuery extendQuery(DruidQuery query,
+      List<Interval> intervals) {
+    return DruidQuery.create(query.getCluster(), query.getTraitSet(), query.getTable(),
+        query.druidTable, intervals, query.rels, query.getOperatorConversionMap());
+  }
+
+  /**
+   * @param rexNode    leaf Input Ref to Druid Column
+   * @param rowType    row type
+   * @param druidQuery druid query
+   *
+   * @return {@link Pair} of Column name and Extraction Function on the top of the input ref or
+   * {@link Pair of(null, null)} when can not translate to valid Druid column
+   */
+  protected static Pair<String, ExtractionFunction> toDruidColumn(RexNode rexNode,
+      RelDataType rowType, DruidQuery druidQuery) {
+    final String columnName;
+    final ExtractionFunction extractionFunction;
+    final Granularity granularity;
+    switch (rexNode.getKind()) {
+    case INPUT_REF:
+      columnName = extractColumnName(rexNode, rowType, druidQuery);
+      //@TODO we can remove this ugly check by treating druid time columns as LONG
+      if (rexNode.getType().getFamily() == SqlTypeFamily.DATE
+          || rexNode.getType().getFamily() == SqlTypeFamily.TIMESTAMP) {
+        extractionFunction = TimeExtractionFunction
+            .createDefault(druidQuery.getConnectionConfig().timeZone());
+      } else {
+        extractionFunction = null;
+      }
+      break;
+    case EXTRACT:
+      granularity = DruidDateTimeUtils
+          .extractGranularity(rexNode, druidQuery.getConnectionConfig().timeZone());
+      if (granularity == null) {
+        // unknown Granularity
+        return Pair.of(null, null);
+      }
+      if (!TimeExtractionFunction.isValidTimeExtract((RexCall) rexNode)) {
+        return Pair.of(null, null);
+      }
+      extractionFunction =
+          TimeExtractionFunction.createExtractFromGranularity(granularity,
+              druidQuery.getConnectionConfig().timeZone());
+      columnName =
+          extractColumnName(((RexCall) rexNode).getOperands().get(1), rowType, druidQuery);
+
+      break;
+    case FLOOR:
+      granularity = DruidDateTimeUtils
+          .extractGranularity(rexNode, druidQuery.getConnectionConfig().timeZone());
+      if (granularity == null) {
+        // unknown Granularity
+        return Pair.of(null, null);
+      }
+      if (!TimeExtractionFunction.isValidTimeFloor((RexCall) rexNode)) {
+        return Pair.of(null, null);
+      }
+      extractionFunction =
+          TimeExtractionFunction
+              .createFloorFromGranularity(granularity, druidQuery.getConnectionConfig().timeZone());
+      columnName =
+          extractColumnName(((RexCall) rexNode).getOperands().get(0), rowType, druidQuery);
+      break;
+    case CAST:
+      // CASE we have a cast over InputRef. Check that cast is valid
+      if (!isValidLeafCast(rexNode)) {
+        return Pair.of(null, null);
+      }
+      columnName =
+          extractColumnName(((RexCall) rexNode).getOperands().get(0), rowType, druidQuery);
+      // CASE CAST to TIME/DATE need to make sure that we have valid extraction fn
+      final SqlTypeName toTypeName = rexNode.getType().getSqlTypeName();
+      if (toTypeName.getFamily() == SqlTypeFamily.TIMESTAMP
+          || toTypeName.getFamily() == SqlTypeFamily.DATETIME) {
+        extractionFunction = TimeExtractionFunction.translateCastToTimeExtract(rexNode,
+            TimeZone.getTimeZone(druidQuery.getConnectionConfig().timeZone()));
+        if (extractionFunction == null) {
+          // no extraction Function means cast is not valid thus bail out
+          return Pair.of(null, null);
+        }
+      } else {
+        extractionFunction = null;
+      }
+      break;
+    default:
+      return Pair.of(null, null);
+    }
+    return Pair.of(columnName, extractionFunction);
+  }
+
+  /**
+   * @param rexNode rexNode
+   *
+   * @return true if the operand is an inputRef and it is a valid Druid Cast operation
+   */
+  private static boolean isValidLeafCast(RexNode rexNode) {
+    assert rexNode.isA(SqlKind.CAST);
+    final RexNode input = ((RexCall) rexNode).getOperands().get(0);
+    if (!input.isA(SqlKind.INPUT_REF)) {
+      // it is not a leaf cast don't bother going further.
+      return false;
+    }
+    final SqlTypeName toTypeName = rexNode.getType().getSqlTypeName();
+    if (toTypeName.getFamily() == SqlTypeFamily.CHARACTER) {
+      // CAST of input to character type
+      return true;
+    }
+    if (toTypeName.getFamily() == SqlTypeFamily.NUMERIC) {
+      // CAST of input to numeric type, it is part of a bounded comparison
+      return true;
+    }
+    if (toTypeName.getFamily() == SqlTypeFamily.TIMESTAMP
+        || toTypeName.getFamily() == SqlTypeFamily.DATETIME) {
+      // CAST of literal to timestamp type
+      return true;
+    }
+    if (toTypeName.getFamily().contains(input.getType())) {
+      //same type it is okay to push it
+      return true;
+    }
+    // Currently other CAST operations cannot be pushed to Druid
+    return false;
+
+  }
+
+  /**
+   * @param rexNode Druid input ref node
+   * @param rowType rowType
+   * @param query Druid Query
+   *
+   * @return Druid column name or null when not possible to translate.
+   */
+  @Nullable
+  protected static String extractColumnName(RexNode rexNode, RelDataType rowType,
+      DruidQuery query) {
+    if (rexNode.getKind() == SqlKind.INPUT_REF) {
+      final RexInputRef ref = (RexInputRef) rexNode;
+      final String columnName = rowType.getFieldNames().get(ref.getIndex());
+      if (columnName == null) {
+        return null;
+      }
+      //calcite has this un-direct renaming of timestampFieldName to native druid `__time`
+      if (query.getDruidTable().timestampFieldName.equals(columnName)) {
+        return DruidTable.DEFAULT_TIMESTAMP_COLUMN;
+      }
+      return columnName;
+    }
+    return null;
+  }
+
+  /**
+   * Equivalent of String.format(Locale.ENGLISH, message, formatArgs).
+   */
+  public static String format(String message, Object... formatArgs) {
+    return String.format(Locale.ENGLISH, message, formatArgs);
+  }
+
   /** Returns a string describing the operations inside this query.
    *
-   * <p>For example, "sfpaol" means {@link TableScan} (s)
+   * <p>For example, "sfpahol" means {@link TableScan} (s)
    * followed by {@link Filter} (f)
    * followed by {@link Project} (p)
    * followed by {@link Aggregate} (a)
+   * followed by {@link Filter} (h)
    * followed by {@link Project} (o)
    * followed by {@link Sort} (l).
    *
@@ -150,11 +418,12 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
     for (RelNode rel : rels) {
       b.append(rel instanceof TableScan ? 's'
           : (rel instanceof Project && flag) ? 'o'
-          : rel instanceof Filter ? 'f'
-          : rel instanceof Aggregate ? 'a'
-          : rel instanceof Sort ? 'l'
-          : rel instanceof Project ? 'p'
-          : '!');
+              : (rel instanceof Filter && flag) ? 'h'
+                  : rel instanceof Aggregate ? 'a'
+                      : rel instanceof Filter ? 'f'
+                          : rel instanceof Sort ? 'l'
+                              : rel instanceof Project ? 'p'
+                                  : '!');
       flag = flag || rel instanceof Aggregate;
     }
     return b.toString();
@@ -194,7 +463,9 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
         }
         if (r instanceof Filter) {
           final Filter filter = (Filter) r;
-          if (!isValidFilter(filter.getCondition())) {
+          final DruidJsonFilter druidJsonFilter = DruidJsonFilter
+              .toDruidFilters(filter.getCondition(), filter.getInput().getRowType(), this);
+          if (druidJsonFilter == null) {
             return litmus.fail("invalid filter [{}]", filter.getCondition());
           }
         }
@@ -209,109 +480,8 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
     return true;
   }
 
-  public boolean isValidFilter(RexNode e) {
-    return isValidFilter(e, false);
-  }
-
-  public boolean isValidFilter(RexNode e, boolean boundedComparator) {
-    switch (e.getKind()) {
-    case INPUT_REF:
-      return true;
-    case LITERAL:
-      return ((RexLiteral) e).getValue() != null;
-    case AND:
-    case OR:
-    case NOT:
-    case IN:
-    case IS_NULL:
-    case IS_NOT_NULL:
-      return areValidFilters(((RexCall) e).getOperands(), false);
-    case EQUALS:
-    case NOT_EQUALS:
-    case LESS_THAN:
-    case LESS_THAN_OR_EQUAL:
-    case GREATER_THAN:
-    case GREATER_THAN_OR_EQUAL:
-    case BETWEEN:
-      return areValidFilters(((RexCall) e).getOperands(), true);
-    case CAST:
-      return isValidCast((RexCall) e, boundedComparator);
-    case EXTRACT:
-      return TimeExtractionFunction.isValidTimeExtract((RexCall) e);
-    case FLOOR:
-      return TimeExtractionFunction.isValidTimeFloor((RexCall) e);
-    case IS_TRUE:
-      return isValidFilter(((RexCall) e).getOperands().get(0), boundedComparator);
-    default:
-      return false;
-    }
-  }
-
-  private boolean areValidFilters(List<RexNode> es, boolean boundedComparator) {
-    for (RexNode e : es) {
-      if (!isValidFilter(e, boundedComparator)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  private static boolean isValidCast(RexCall e, boolean boundedComparator) {
-    assert e.isA(SqlKind.CAST);
-    if (e.getOperands().get(0).isA(INPUT_REF)
-        && e.getType().getFamily() == SqlTypeFamily.CHARACTER) {
-      // CAST of input to character type
-      return true;
-    }
-    if (e.getOperands().get(0).isA(INPUT_REF)
-        && e.getType().getFamily() == SqlTypeFamily.NUMERIC
-        && boundedComparator) {
-      // CAST of input to numeric type, it is part of a bounded comparison
-      return true;
-    }
-    if (e.getOperands().get(0).isA(SqlKind.LITERAL)
-        && (e.getType().getSqlTypeName() == SqlTypeName.DATE
-        || e.getType().getSqlTypeName() == SqlTypeName.TIMESTAMP
-        || e.getType().getSqlTypeName() == SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE)) {
-      // CAST of literal to timestamp type
-      return true;
-    }
-    // Currently other CAST operations cannot be pushed to Druid
-    return false;
-  }
-
-  /** Returns whether a signature represents an sequence of relational operators
-   * that can be translated into a valid Druid query. */
-  static boolean isValidSignature(String signature) {
-    return VALID_SIG.matcher(signature).matches();
-  }
-
-  /** Creates a DruidQuery. */
-  public static DruidQuery create(RelOptCluster cluster, RelTraitSet traitSet,
-      RelOptTable table, DruidTable druidTable, List<RelNode> rels) {
-    return new DruidQuery(cluster, traitSet, table, druidTable, druidTable.intervals, rels);
-  }
-
-  /** Creates a DruidQuery. */
-  private static DruidQuery create(RelOptCluster cluster, RelTraitSet traitSet,
-      RelOptTable table, DruidTable druidTable, List<Interval> intervals,
-      List<RelNode> rels) {
-    return new DruidQuery(cluster, traitSet, table, druidTable, intervals, rels);
-  }
-
-  /** Extends a DruidQuery. */
-  public static DruidQuery extendQuery(DruidQuery query, RelNode r) {
-    final ImmutableList.Builder<RelNode> builder = ImmutableList.builder();
-    return DruidQuery.create(query.getCluster(), r.getTraitSet().replace(query.getConvention()),
-        query.getTable(), query.druidTable, query.intervals,
-        builder.addAll(query.rels).add(r).build());
-  }
-
-  /** Extends a DruidQuery. */
-  public static DruidQuery extendQuery(DruidQuery query,
-      List<Interval> intervals) {
-    return DruidQuery.create(query.getCluster(), query.getTraitSet(), query.getTable(),
-        query.druidTable, intervals, query.rels);
+  protected Map<SqlOperator, DruidSqlOperatorConverter> getOperatorConversionMap() {
+    return converterOperatorMap;
   }
 
   @Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) {
@@ -389,6 +559,8 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
         .multiplyBy(
             RelMdUtil.linear(querySpec.fieldNames.size(), 2, 100, 1d, 2d))
         .multiplyBy(getQueryTypeCostMultiplier())
+        //A Scan leaf filter is better than having filter spec if possible.
+        .multiplyBy(rels.size() > 1 && rels.get(1) instanceof Filter ? 0.5 : 1.0)
         // a plan with sort pushed to druid is better than doing sort outside of druid
         .multiplyBy(Util.last(rels) instanceof Sort ? 0.1 : 1.0)
         .multiplyBy(getIntervalCostMultiplier());
@@ -455,16 +627,14 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
     final RelDataType rowType = table.getRowType();
     int i = 1;
 
-    RexNode filter = null;
+    Filter filterRel = null;
     if (i < rels.size() && rels.get(i) instanceof Filter) {
-      final Filter filterRel = (Filter) rels.get(i++);
-      filter = filterRel.getCondition();
+      filterRel = (Filter) rels.get(i++);
     }
 
-    List<RexNode> projects = null;
+    Project project = null;
     if (i < rels.size() && rels.get(i) instanceof Project) {
-      final Project project = (Project) rels.get(i++);
-      projects = project.getProjects();
+      project = (Project) rels.get(i++);
     }
 
     ImmutableBitSet groupSet = null;
@@ -478,6 +648,11 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
           groupSet.cardinality());
     }
 
+    Filter havingFilter = null;
+    if (i < rels.size() && rels.get(i) instanceof Filter) {
+      havingFilter = (Filter) rels.get(i++);
+    }
+
     Project postProject = null;
     if (i < rels.size() && rels.get(i) instanceof Project) {
       postProject = (Project) rels.get(i++);
@@ -506,9 +681,9 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
       throw new AssertionError("could not implement all rels");
     }
 
-    return getQuery(rowType, filter, projects, groupSet, aggCalls, aggNames,
+    return getQuery(rowType, filterRel, project, groupSet, aggCalls, aggNames,
         collationIndexes, collationDirections, numericCollationBitSetBuilder.build(), fetch,
-        postProject);
+        postProject, havingFilter);
   }
 
   public QueryType getQueryType() {
@@ -523,355 +698,668 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
     return getCluster().getPlanner().getContext().unwrap(CalciteConnectionConfig.class);
   }
 
-  protected QuerySpec getQuery(RelDataType rowType, RexNode filter, List<RexNode> projects,
-      ImmutableBitSet groupSet, List<AggregateCall> aggCalls, List<String> aggNames,
-      List<Integer> collationIndexes, List<Direction> collationDirections,
-      ImmutableBitSet numericCollationIndexes, Integer fetch, Project postProject) {
-    final CalciteConnectionConfig config = getConnectionConfig();
-    QueryType queryType = QueryType.SCAN;
-    final Translator translator = new Translator(druidTable, rowType, config.timeZone());
-    List<String> fieldNames = rowType.getFieldNames();
-    Set<String> usedFieldNames = Sets.newHashSet(fieldNames);
-
-    // Handle filter
-    Json jsonFilter = null;
+  /**
+   * Translates Filter rel to Druid Filter Json object if possible.
+   * Currently Filter rel input has to be Druid Table scan
+   *
+   * @param filterRel input filter rel
+   * @param druidQuery Druid query
+   *
+   * @return DruidJson Filter or null if can not translate one of filters
+   */
+  @Nullable
+  private static DruidJsonFilter computeFilter(@Nullable Filter filterRel,
+      DruidQuery druidQuery) {
+    if (filterRel == null) {
+      return null;
+    }
+    final RexNode filter = filterRel.getCondition();
+    final RelDataType inputRowType = filterRel.getInput().getRowType();
     if (filter != null) {
-      jsonFilter = translator.translateFilter(filter);
+      return DruidJsonFilter.toDruidFilters(filter, inputRowType, druidQuery);
     }
+    return null;
+  }
 
-    // Then we handle project
-    if (projects != null) {
-      translator.clearFieldNameLists();
-      final ImmutableList.Builder<String> builder = ImmutableList.builder();
-      for (RexNode project : projects) {
-        builder.add(translator.translate(project, true, false));
+  /**
+   * Translates list of projects to Druid Column names and Virtual Columns if any
+   * We can not use {@link Pair#zip(Object[], Object[])}, since size can be different
+   *
+   * @param projectRel       Project Rel
+   *
+   * @param druidQuery Druid query
+   *
+   * @return Pair of list of Druid Columns and Expression Virtual Columns or null when can not
+   * translate one of the projects.
+   */
+  @Nullable
+  protected static Pair<List<String>, List<VirtualColumn>> computeProjectAsScan(
+      @Nullable Project projectRel, RelDataType inputRowType, DruidQuery druidQuery) {
+    if (projectRel == null) {
+      return null;
+    }
+    final Set<String> usedFieldNames = new HashSet<>();
+    final ImmutableList.Builder<VirtualColumn> virtualColumnsBuilder = ImmutableList.builder();
+    final ImmutableList.Builder<String> projectedColumnsBuilder = ImmutableList.builder();
+    final List<RexNode> projects = projectRel.getProjects();
+    for (RexNode project : projects) {
+      Pair<String, ExtractionFunction> druidColumn =
+          toDruidColumn(project, inputRowType, druidQuery);
+      if (druidColumn.left == null || druidColumn.right != null) {
+        // It is a complex project pushed as expression
+        final String expression = DruidExpressions
+            .toDruidExpression(project, inputRowType, druidQuery);
+        if (expression == null) {
+          return null;
+        }
+        final String virColName = SqlValidatorUtil.uniquify("vc",
+            usedFieldNames, SqlValidatorUtil.EXPR_SUGGESTER);
+        virtualColumnsBuilder.add(VirtualColumn.builder()
+            .withName(virColName)
+            .withExpression(expression).withType(
+                DruidExpressions.EXPRESSION_TYPES.get(project.getType().getSqlTypeName()))
+            .build());
+        usedFieldNames.add(virColName);
+        projectedColumnsBuilder.add(virColName);
+      } else {
+        // simple inputRef or extractable function
+        if (usedFieldNames.contains(druidColumn.left)) {
+          final String virColName = SqlValidatorUtil.uniquify("vc",
+              usedFieldNames, SqlValidatorUtil.EXPR_SUGGESTER);
+          virtualColumnsBuilder.add(VirtualColumn.builder()
+              .withName(virColName)
+              .withExpression(DruidExpressions.fromColumn(druidColumn.left)).withType(
+                  DruidExpressions.EXPRESSION_TYPES.get(project.getType().getSqlTypeName()))
+              .build());
+          usedFieldNames.add(virColName);
+          projectedColumnsBuilder.add(virColName);
+        } else {
+          projectedColumnsBuilder.add(druidColumn.left);
+          usedFieldNames.add(druidColumn.left);
+        }
       }
-      fieldNames = builder.build();
     }
+    return Pair.<List<String>, List<VirtualColumn>>of(projectedColumnsBuilder.build(),
+        virtualColumnsBuilder.build());
+  }
 
-    // Finally we handle aggregate and sort. Handling of these
-    // operators is more complex, since we need to extract
-    // the conditions to know whether the query will be
-    // executed as a Timeseries, TopN, or GroupBy in Druid
-    final List<DimensionSpec> dimensions = new ArrayList<>();
-    final List<JsonAggregation> aggregations = new ArrayList<>();
-    final List<JsonPostAggregation> postAggs = new ArrayList<>();
-    Granularity finalGranularity = Granularities.all();
-    Direction timeSeriesDirection = null;
-    JsonLimit limit = null;
-    TimeExtractionDimensionSpec timeExtractionDimensionSpec = null;
-    if (groupSet != null) {
-      assert aggCalls != null;
-      assert aggNames != null;
-      assert aggCalls.size() == aggNames.size();
-
-      int timePositionIdx = -1;
-      ImmutableList.Builder<String> builder = ImmutableList.builder();
-      if (projects != null) {
-        for (int groupKey : groupSet) {
-          final String fieldName = fieldNames.get(groupKey);
-          final RexNode project = projects.get(groupKey);
-          if (project instanceof RexInputRef) {
-            // Reference could be to the timestamp or druid dimension but no druid metric
-            final RexInputRef ref = (RexInputRef) project;
-            final String originalFieldName = druidTable.getRowType(getCluster().getTypeFactory())
-                .getFieldList().get(ref.getIndex()).getName();
-            if (originalFieldName.equals(druidTable.timestampFieldName)) {
-              finalGranularity = Granularities.all();
-              String extractColumnName = SqlValidatorUtil.uniquify(EXTRACT_COLUMN_NAME_PREFIX,
-                  usedFieldNames, SqlValidatorUtil.EXPR_SUGGESTER);
-              timeExtractionDimensionSpec = TimeExtractionDimensionSpec.makeFullTimeExtract(
-                  extractColumnName, config.timeZone());
-              dimensions.add(timeExtractionDimensionSpec);
-              builder.add(extractColumnName);
-              assert timePositionIdx == -1;
-              timePositionIdx = groupKey;
-            } else {
-              dimensions.add(new DefaultDimensionSpec(fieldName));
-              builder.add(fieldName);
-            }
-          } else if (project instanceof RexCall) {
-            // Call, check if we should infer granularity
-            final RexCall call = (RexCall) project;
-            final Granularity funcGranularity =
-                DruidDateTimeUtils.extractGranularity(call, config.timeZone());
-            if (funcGranularity != null) {
-              final String extractColumnName;
-              switch (call.getKind()) {
-              case EXTRACT:
-                // case extract field from time column
-                finalGranularity = Granularities.all();
-                extractColumnName =
-                    SqlValidatorUtil.uniquify(EXTRACT_COLUMN_NAME_PREFIX + "_"
-                        + funcGranularity.getType().lowerName, usedFieldNames,
-                        SqlValidatorUtil.EXPR_SUGGESTER);
-                timeExtractionDimensionSpec = TimeExtractionDimensionSpec.makeTimeExtract(
-                    funcGranularity, extractColumnName, config.timeZone());
-                dimensions.add(timeExtractionDimensionSpec);
-                builder.add(extractColumnName);
-                break;
-              case FLOOR:
-                // case floor time column
-                if (groupSet.cardinality() > 1) {
-                  // case we have more than 1 group by key -> then will have druid group by
-                  extractColumnName =
-                      SqlValidatorUtil.uniquify(FLOOR_COLUMN_NAME_PREFIX
-                          + "_" + funcGranularity.getType().lowerName,
-                          usedFieldNames, SqlValidatorUtil.EXPR_SUGGESTER);
-                  dimensions.add(
-                      TimeExtractionDimensionSpec.makeTimeFloor(funcGranularity,
-                          extractColumnName, config.timeZone()));
-                  finalGranularity = Granularities.all();
-                  builder.add(extractColumnName);
-                } else {
-                  // case timeseries we can not use extraction function
-                  finalGranularity = funcGranularity;
-                  builder.add(fieldName);
-                }
-                assert timePositionIdx == -1;
-                timePositionIdx = groupKey;
-                break;
-              default:
-                throw new AssertionError();
-              }
+  /**
+   * @param projectNode Project under the Aggregates if any
+   * @param groupSet ids of grouping keys as they are listed in {@code projects} list
+   * @param inputRowType Input row type under the project
+   * @param druidQuery Druid Query
+   *
+   * @return Pair of: Ordered {@link List<DimensionSpec>} containing the group by dimensions
+   * and {@link List<VirtualColumn>} containing Druid virtual column projections or Null,
+   * if translation is not possible. Note that the size of lists can be different.
+   */
+  @Nullable
+  protected static Pair<List<DimensionSpec>, List<VirtualColumn>> computeProjectGroupSet(
+      @Nullable Project projectNode, ImmutableBitSet groupSet,
+      RelDataType inputRowType,
+      DruidQuery druidQuery) {
+    final List<DimensionSpec> dimensionSpecList = new ArrayList<>();
+    final List<VirtualColumn> virtualColumnList = new ArrayList<>();
+    final Set<String> usedFieldNames = new HashSet<>();
+    for (int groupKey : groupSet) {
+      final DimensionSpec dimensionSpec;
+      final RexNode project;
+      if (projectNode == null) {
+        project =  RexInputRef.of(groupKey, inputRowType);
+      } else {
+        project = projectNode.getProjects().get(groupKey);
+      }
 
-            } else {
-              dimensions.add(new DefaultDimensionSpec(fieldName));
-              builder.add(fieldName);
-            }
-          } else {
-            throw new AssertionError("incompatible project expression: " + project);
-          }
+      Pair<String, ExtractionFunction> druidColumn =
+          toDruidColumn(project, inputRowType, druidQuery);
+      if (druidColumn.left != null && druidColumn.right == null) {
+        //SIMPLE INPUT REF
+        dimensionSpec = new DefaultDimensionSpec(druidColumn.left, druidColumn.left,
+            DruidExpressions.EXPRESSION_TYPES.get(project.getType().getSqlTypeName()));
+        usedFieldNames.add(druidColumn.left);
+      } else if (druidColumn.left != null && druidColumn.right != null) {
+       // CASE it is an extraction Dimension
+        final String columnPrefix;
+        //@TODO Remove it! if else statement is not really needed it is here to make tests pass.
+        if (project.getKind() == SqlKind.EXTRACT) {
+          columnPrefix =
+              EXTRACT_COLUMN_NAME_PREFIX + "_" + Objects
+                  .requireNonNull(DruidDateTimeUtils
+                      .extractGranularity(project, druidQuery.getConnectionConfig().timeZone())
+                      .getType().lowerName);
+        } else if (project.getKind() == SqlKind.FLOOR) {
+          columnPrefix =
+              FLOOR_COLUMN_NAME_PREFIX + "_" + Objects
+                  .requireNonNull(DruidDateTimeUtils
+                      .extractGranularity(project, druidQuery.getConnectionConfig().timeZone())
+                      .getType().lowerName);
+        } else {
+          columnPrefix = "extract";
+        }
+        final String uniqueExtractColumnName = SqlValidatorUtil
+            .uniquify(columnPrefix, usedFieldNames,
+                SqlValidatorUtil.EXPR_SUGGESTER);
+        dimensionSpec = new ExtractionDimensionSpec(druidColumn.left,
+            druidColumn.right, uniqueExtractColumnName);
+        usedFieldNames.add(uniqueExtractColumnName);
+      } else {
+        // CASE it is Expression
+        final String expression = DruidExpressions
+            .toDruidExpression(project, inputRowType, druidQuery);
+        if (Strings.isNullOrEmpty(expression)) {
+          return null;
         }
+        final String name = SqlValidatorUtil
+            .uniquify("vc", usedFieldNames,
+                SqlValidatorUtil.EXPR_SUGGESTER);
+        VirtualColumn vc = new VirtualColumn(name, expression,
+            DruidExpressions.EXPRESSION_TYPES.get(project.getType().getSqlTypeName()));
+        virtualColumnList.add(vc);
+        dimensionSpec = new DefaultDimensionSpec(name, name,
+            DruidExpressions.EXPRESSION_TYPES.get(project.getType().getSqlTypeName()));
+        usedFieldNames.add(name);
+
+      }
+
+      dimensionSpecList.add(dimensionSpec);
+    }
+    return Pair.of(dimensionSpecList, virtualColumnList);
+  }
+
+  /**
+   * Translates Aggregators Calls to Druid Json Aggregators when possible.
+   *
+   * @param aggCalls List of Agg Calls to translate
+   * @param aggNames Lit of Agg names
+   * @param project Input project under the Agg Calls, if null means we have TableScan->Agg
+   * @param druidQuery Druid Query Rel
+   *
+   * @return List of Valid Druid Json Aggregate or null if any of the aggregates is not supported
+   */
+  @Nullable
+  protected static List<JsonAggregation> computeDruidJsonAgg(List<AggregateCall> aggCalls,
+      List<String> aggNames, @Nullable Project project,
+      DruidQuery druidQuery) {
+    final List<JsonAggregation> aggregations = new ArrayList<>();
+    for (Pair<AggregateCall, String> agg : Pair.zip(aggCalls, aggNames)) {
+      final String fieldName;
+      final String expression;
+      final  AggregateCall aggCall = agg.left;
+      final RexNode filterNode;
+      // Type check First
+      final RelDataType type = aggCall.getType();
+      final SqlTypeName sqlTypeName = type.getSqlTypeName();
+      final boolean isNotAcceptedType;
+      if (SqlTypeFamily.APPROXIMATE_NUMERIC.getTypeNames().contains(sqlTypeName)
+          || SqlTypeFamily.INTEGER.getTypeNames().contains(sqlTypeName)) {
+        isNotAcceptedType = false;
+      } else if (SqlTypeFamily.EXACT_NUMERIC.getTypeNames().contains(sqlTypeName) && (
+          type.getScale() == 0 || druidQuery.getConnectionConfig().approximateDecimal())) {
+        // Decimal, If scale is zero or we allow approximating decimal, we can proceed
+        isNotAcceptedType = false;
       } else {
-        for (int groupKey : groupSet) {
-          final String s = fieldNames.get(groupKey);
-          if (s.equals(druidTable.timestampFieldName)) {
-            finalGranularity = Granularities.all();
-            // Generate unique name as timestampFieldName is taken
-            String extractColumnName = SqlValidatorUtil.uniquify(EXTRACT_COLUMN_NAME_PREFIX,
-                usedFieldNames, SqlValidatorUtil.EXPR_SUGGESTER);
-            timeExtractionDimensionSpec = TimeExtractionDimensionSpec.makeFullTimeExtract(
-                extractColumnName, config.timeZone());
-            dimensions.add(timeExtractionDimensionSpec);
-            builder.add(extractColumnName);
-            assert timePositionIdx == -1;
-            timePositionIdx = groupKey;
+        isNotAcceptedType = true;
+      }
+      if (isNotAcceptedType) {
+        return null;
+      }
+
+      // Extract filters
+      if (project != null && aggCall.hasFilter()) {
+        filterNode = project.getProjects().get(aggCall.filterArg);
+      } else {
+        filterNode = null;
+      }
+      if (aggCall.getArgList().size() == 0) {
+        fieldName = null;
+        expression = null;
+      } else {
+        int index = Iterables.getOnlyElement(aggCall.getArgList());
+        if (project == null) {
+          fieldName = druidQuery.table.getRowType().getFieldNames().get(index);
+          expression = null;
+        } else {
+          final RexNode rexNode = project.getProjects().get(index);
+          final RelDataType inputRowType = project.getInput().getRowType();
+          if (rexNode.isA(SqlKind.INPUT_REF)) {
+            expression = null;
+            fieldName =
+                extractColumnName(rexNode, inputRowType, druidQuery);
           } else {
-            dimensions.add(new DefaultDimensionSpec(s));
-            builder.add(s);
+            expression = DruidExpressions
+                .toDruidExpression(rexNode, inputRowType, druidQuery);
+            if (Strings.isNullOrEmpty(expression)) {
+              return null;
+            }
+            fieldName = null;
           }
         }
+        //One should be not null and the other should be null.
+        assert expression == null ^ fieldName == null;
       }
+      final JsonAggregation jsonAggregation = getJsonAggregation(agg.right, agg.left, filterNode,
+          fieldName, expression,
+          druidQuery);
+      if (jsonAggregation == null) {
+        return null;
+      }
+      aggregations.add(jsonAggregation);
+    }
+    return aggregations;
+  }
+
+  protected QuerySpec getQuery(RelDataType rowType, Filter filter, Project project,
+      ImmutableBitSet groupSet, List<AggregateCall> aggCalls, List<String> aggNames,
+      List<Integer> collationIndexes, List<Direction> collationDirections,
+      ImmutableBitSet numericCollationIndexes, Integer fetch, Project postProject,
+      Filter havingFilter) {
+    // Handle filter
+    final DruidJsonFilter jsonFilter = computeFilter(filter, this);
 
-      for (Pair<AggregateCall, String> agg : Pair.zip(aggCalls, aggNames)) {
-        final JsonAggregation jsonAggregation =
-            getJsonAggregation(fieldNames, agg.right, agg.left, projects, translator);
-        aggregations.add(jsonAggregation);
-        builder.add(jsonAggregation.name);
+    if (groupSet == null) {
+      //It is Scan Query since no Grouping
+      assert aggCalls == null;
+      assert aggNames == null;
+      assert collationIndexes == null || collationIndexes.isEmpty();
+      assert collationDirections == null || collationDirections.isEmpty();
+      final List<String> scanColumnNames;
+      final List<VirtualColumn> virtualColumnList = new ArrayList<>();
+      if (project != null) {
+        //project some fields only
+        Pair<List<String>, List<VirtualColumn>> projectResult = computeProjectAsScan(
+            project, project.getInput().getRowType(), this);
+        scanColumnNames = projectResult.left;
+        virtualColumnList.addAll(projectResult.right);
+      } else {
+        //Scan all the fields
+        scanColumnNames = rowType.getFieldNames();
       }
+      final ScanQuery scanQuery = new ScanQuery(druidTable.dataSource, intervals, jsonFilter,
+          virtualColumnList, scanColumnNames, fetch);
+      return new QuerySpec(QueryType.SCAN,
+          Preconditions.checkNotNull(scanQuery.toQuery(), "Can not plan Scan Druid Query"),
+          scanColumnNames);
+    }
 
-      fieldNames = builder.build();
-
-      if (postProject != null) {
-        builder = ImmutableList.builder();
-        for (Pair<RexNode, String> pair : postProject.getNamedProjects()) {
-          String fieldName = pair.right;
-          RexNode rex = pair.left;
-          builder.add(fieldName);
-          // Render Post JSON object when PostProject exists. In DruidPostAggregationProjectRule
-          // all check has been done to ensure all RexCall rexNode can be pushed in.
-          if (rex instanceof RexCall) {
-            DruidQuery.JsonPostAggregation jsonPost = getJsonPostAggregation(fieldName, rex,
-                postProject.getInput());
-            postAggs.add(jsonPost);
-          }
+    // At this Stage we have a valid Aggregate thus Query is one of Timeseries, TopN, or GroupBy
+    // Handling aggregate and sort is more complex, since
+    // we need to extract the conditions to know whether the query will be executed as a
+    // Timeseries, TopN, or GroupBy in Druid
+    assert aggCalls != null;
+    assert aggNames != null;
+    assert aggCalls.size() == aggNames.size();
+
+    final List<JsonExpressionPostAgg> postAggs = new ArrayList<>();
+    final JsonLimit limit;
+    final RelDataType aggInputRowType = table.getRowType();
+    final List<String> aggregateStageFieldNames = new ArrayList<>();
+
+    Pair<List<DimensionSpec>, List<VirtualColumn>> projectGroupSet = computeProjectGroupSet(
+        project, groupSet, aggInputRowType, this);
+
+    final List<DimensionSpec> groupByKeyDims = projectGroupSet.left;
+    final List<VirtualColumn> virtualColumnList = projectGroupSet.right;
+    for (DimensionSpec dim : groupByKeyDims) {
+      aggregateStageFieldNames.add(dim.getOutputName());
+    }
+    final List<JsonAggregation> aggregations = computeDruidJsonAgg(aggCalls, aggNames, project,
+        this);
+    for (JsonAggregation jsonAgg : aggregations) {
+      aggregateStageFieldNames.add(jsonAgg.name);
+    }
+
+
+    final DruidJsonFilter havingJsonFilter;
+    if (havingFilter != null) {
+      havingJsonFilter = DruidJsonFilter
+          .toDruidFilters(havingFilter.getCondition(), havingFilter.getInput().getRowType(), this);
+    } else {
+      havingJsonFilter = null;
+    }
+
+    //Then we handle projects after aggregates as Druid Post Aggregates
+    final List<String> postAggregateStageFieldNames;
+    if (postProject != null) {
+      final List<String> postProjectDimListBuilder = new ArrayList<>();
+      final RelDataType postAggInputRowType = getCluster().getTypeFactory()
+          .createStructType(Pair.right(postProject.getInput().getRowType().getFieldList()),
+              aggregateStageFieldNames);
+      // this is an index of existing columns coming out aggregate layer. Will use this index to:
+      // filter out any project down the road that doesn't change values e.g inputRef/identity cast
+      Map<String, String> existingProjects = Maps
+          .uniqueIndex(aggregateStageFieldNames, new Function<String, String>() {
+            @Override public String apply(@Nullable String input) {
+              return DruidExpressions.fromColumn(input);
+            }
+          });
+      for (Pair<RexNode, String> pair : postProject.getNamedProjects()) {
+        final RexNode postProjectRexNode = pair.left;
+        final String postProjectFieldName = pair.right;
+        String expression = DruidExpressions
+              .toDruidExpression(postProjectRexNode, postAggInputRowType, this);
+        final String existingFieldName = existingProjects.get(expression);
+        if (existingFieldName != null) {
+          //simple input ref or Druid runtime identity cast will skip it, since it is here already
+          postProjectDimListBuilder.add(existingFieldName);
+        } else {
+          postAggs.add(new JsonExpressionPostAgg(postProjectFieldName, expression, null));
+          postProjectDimListBuilder.add(postProjectFieldName);
         }
-        fieldNames = builder.build();
       }
+      postAggregateStageFieldNames = postProjectDimListBuilder;
+    } else {
+      postAggregateStageFieldNames = null;
+    }
+
+    // final Query output row field names.
+    final List<String> queryOutputFieldNames = postAggregateStageFieldNames == null
+        ? aggregateStageFieldNames
+        : postAggregateStageFieldNames;
+
+    //handle sort all together
+    limit = computeSort(fetch, collationIndexes, collationDirections, numericCollationIndexes,
+        queryOutputFieldNames);
+
+    final String timeSeriesQueryString = planAsTimeSeries(groupByKeyDims, jsonFilter,
+        virtualColumnList, aggregations, postAggs, limit, havingJsonFilter);
+    if (timeSeriesQueryString != null) {
+      final String timeExtractColumn = groupByKeyDims.isEmpty()
+          ? null
+          : groupByKeyDims.get(0).getOutputName();
+      if (timeExtractColumn != null) {
+        //Case we have transformed the group by time to druid timeseries with Granularity
+        //Need to replace the name of the column with druid timestamp field name
+        final List<String> timeseriesFieldNames = Lists
+            .transform(queryOutputFieldNames, new Function<String, String>() {
+              @Override public String apply(@Nullable String input) {
+                if (timeExtractColumn.equals(input)) {
+                  return "timestamp";
+                }
+                return input;
+              }
+            });
+        return new QuerySpec(QueryType.TIMESERIES, timeSeriesQueryString, timeseriesFieldNames);
+      }
+      return new QuerySpec(QueryType.TIMESERIES, timeSeriesQueryString, queryOutputFieldNames);
+    }
+    final String topNQuery = planAsTopN(groupByKeyDims, jsonFilter,
+        virtualColumnList, aggregations, postAggs, limit, havingJsonFilter);
+    if (topNQuery != null) {
+      return new QuerySpec(QueryType.TOP_N, topNQuery, queryOutputFieldNames);
+    }
+
+    final String groupByQuery = planAsGroupBy(groupByKeyDims, jsonFilter,
+        virtualColumnList, aggregations, postAggs, limit, havingJsonFilter);
+
+    if (groupByQuery == null) {
+      throw new IllegalStateException("Can not plan Druid Query");
+    }
+    return new QuerySpec(QueryType.GROUP_BY, groupByQuery, queryOutputFieldNames);
+  }
 
-      ImmutableList<JsonCollation> collations = null;
-      boolean sortsMetric = false;
-      if (collationIndexes != null) {
-        assert collationDirections != null;
-        ImmutableList.Builder<JsonCollation> colBuilder =
-            ImmutableList.builder();
-        for (Pair<Integer, Direction> p : Pair.zip(collationIndexes, collationDirections)) {
-          final String dimensionOrder = numericCollationIndexes.get(p.left) ? "numeric"
-              : "alphanumeric";
-          colBuilder.add(
-              new JsonCollation(fieldNames.get(p.left),
-                  p.right == Direction.DESCENDING ? "descending" : "ascending", dimensionOrder));
-          if (p.left >= groupSet.cardinality() && p.right == Direction.DESCENDING) {
-            // Currently only support for DESC in TopN
-            sortsMetric = true;
-          } else if (p.left == timePositionIdx) {
-            assert timeSeriesDirection == null;
-            timeSeriesDirection = p.right;
+  /**
+   * @param fetch limit to fetch
+   * @param collationIndexes index of fields as listed in query row output
+   * @param collationDirections direction of sort
+   * @param numericCollationIndexes flag of to determine sort comparator
+   * @param queryOutputFieldNames query output fields
+   *
+   * @return always an non null Json Limit object
+   */
+  private JsonLimit computeSort(@Nullable Integer fetch, List<Integer> collationIndexes,
+      List<Direction> collationDirections, ImmutableBitSet numericCollationIndexes,
+      List<String> queryOutputFieldNames) {
+    final List<JsonCollation> collations;
+    if (collationIndexes != null) {
+      assert collationDirections != null;
+      ImmutableList.Builder<JsonCollation> colBuilder = ImmutableList.builder();
+      for (Pair<Integer, Direction> p : Pair.zip(collationIndexes, collationDirections)) {
+        final String dimensionOrder = numericCollationIndexes.get(p.left)
+            ? "numeric"
+            : "lexicographic";
+        colBuilder.add(
+            new JsonCollation(queryOutputFieldNames.get(p.left),
+                p.right == Direction.DESCENDING ? "descending" : "ascending", dimensionOrder));
+      }
+      collations = colBuilder.build();
+    } else {
+      collations = null;
+    }
+    return new JsonLimit("default", fetch, collations);
+  }
+
+  @Nullable
+  private String planAsTimeSeries(List<DimensionSpec> groupByKeyDims, DruidJsonFilter jsonFilter,
+      List<VirtualColumn> virtualColumnList, List<JsonAggregation> aggregations,
+      List<JsonExpressionPostAgg> postAggregations, JsonLimit limit, DruidJsonFilter havingFilter) {
+    if (havingFilter != null) {
+      return null;
+    }
+    if (groupByKeyDims.size() > 1) {
+      return null;
+    }
+    if (limit.limit != null) {
+      // it has a limit not supported by time series
+      return null;
+    }
+    if (limit.collations != null && limit.collations.size() > 1) {
+      //it has multiple sort columns
+      return null;
+    }
+    final String sortDirection;
+    if (limit.collations != null && limit.collations.size() == 1) {
+      if (groupByKeyDims.isEmpty()
+          || !(limit.collations.get(0).dimension.equals(groupByKeyDims.get(0).getOutputName()))) {
+        //sort column is not time column
+        return null;
+      }
+      sortDirection = limit.collations.get(0).direction;
+    } else {
+      sortDirection = null;
+    }
+
+    final Granularity timeseriesGranularity;
+    if (groupByKeyDims.size() == 1) {
+      DimensionSpec dimensionSpec = Iterables.getOnlyElement(groupByKeyDims);
+      Granularity granularity = ExtractionDimensionSpec.toQueryGranularity(dimensionSpec);
+      //case we have project expression on the top of the time extract then can not use timeseries
+      boolean hasExpressionOnTopOfTimeExtract = false;
+      for (JsonExpressionPostAgg postAgg : postAggregations) {
+        if (postAgg instanceof JsonExpressionPostAgg) {
+          if (postAgg.expression.contains(groupByKeyDims.get(0).getOutputName())) {
+            hasExpressionOnTopOfTimeExtract = true;
           }
         }
-        collations = colBuilder.build();
       }
-
-      limit = new JsonLimit("default", fetch, collations);
-
-      if (dimensions.isEmpty() && (collations == null || timeSeriesDirection != null)) {
-        queryType = QueryType.TIMESERIES;
-        assert fetch == null;
-      } else if (dimensions.size() == 1
-          && finalGranularity.equals(Granularities.all())
-          && sortsMetric
-          && collations.size() == 1
-          && fetch != null
-          && config.approximateTopN()) {
-        queryType = QueryType.TOP_N;
-      } else {
-        queryType = QueryType.GROUP_BY;
+      timeseriesGranularity = hasExpressionOnTopOfTimeExtract ? null : granularity;
+      if (timeseriesGranularity == null) {
+        // can not extract granularity bailout
+        return null;
       }
     } else {
-      assert aggCalls == null;
-      assert aggNames == null;
-      assert collationIndexes == null || collationIndexes.isEmpty();
-      assert collationDirections == null || collationDirections.isEmpty();
+      timeseriesGranularity = Granularities.all();
     }
 
+    final boolean isCountStar = Granularities.all() == timeseriesGranularity
+        && aggregations.size() == 1
+        && aggregations.get(0).type.equals("count");
+
     final StringWriter sw = new StringWriter();
     final JsonFactory factory = new JsonFactory();
     try {
       final JsonGenerator generator = factory.createGenerator(sw);
+      generator.writeStartObject();
+      generator.writeStringField("queryType", "timeseries");
+      generator.writeStringField("dataSource", druidTable.dataSource);
+      generator.writeBooleanField("descending", sortDirection != null
+          && sortDirection.equals("descending"));
+      writeField(generator, "granularity", timeseriesGranularity);
+      writeFieldIf(generator, "filter", jsonFilter);
+      writeField(generator, "aggregations", aggregations);
+      writeFieldIf(generator, "virtualColumns",
+          virtualColumnList.size() > 0 ? virtualColumnList : null);
+      writeFieldIf(generator, "postAggregations",
+          postAggregations.size() > 0 ? postAggregations : null);
+      writeField(generator, "intervals", intervals);
+      generator.writeFieldName("context");
+      // The following field is necessary to conform with SQL semantics (CALCITE-1589)
+      generator.writeStartObject();
+      //Count(*) returns 0 if result set is empty thus need to set skipEmptyBuckets to false
+      generator.writeBooleanField("skipEmptyBuckets", !isCountStar);
+      generator.writeEndObject();
+      generator.close();
+    } catch (IOException e) {
+      Throwables.propagate(e);
+    }
+    return sw.toString();
+  }
 
-      switch (queryType) {
-      case TIMESERIES:
-        generator.writeStartObject();
-
-        generator.writeStringField("queryType", "timeseries");
-        generator.writeStringField("dataSource", druidTable.dataSource);
-        generator.writeBooleanField("descending", timeSeriesDirection != null
-            && timeSeriesDirection == Direction.DESCENDING);
-        writeField(generator, "granularity", finalGranularity);
-        writeFieldIf(generator, "filter", jsonFilter);
-        writeField(generator, "aggregations", aggregations);
-        writeFieldIf(generator, "postAggregations", postAggs.size() > 0 ? postAggs : null);
-        writeField(generator, "intervals", intervals);
+  @Nullable
+  private String planAsTopN(List<DimensionSpec> groupByKeyDims, DruidJsonFilter jsonFilter,
+      List<VirtualColumn> virtualColumnList, List<JsonAggregation> aggregations,
+      List<JsonExpressionPostAgg> postAggregations, JsonLimit limit, DruidJsonFilter havingFilter) {
+    if (havingFilter != null) {
+      return null;
+    }
+    if (!getConnectionConfig().approximateTopN() || groupByKeyDims.size() != 1
+        || limit.limit == null || limit.collations == null || limit.collations.size() != 1) {
+      return null;
+    }
+    if (limit.collations.get(0).dimension.equals(groupByKeyDims.get(0).getOutputName())) {
+      return null;
+    }
+    if (limit.collations.get(0).direction.equals("ascending")) {
+      //Only DESC is allowed
+      return null;
+    }
 
-        generator.writeFieldName("context");
-        // The following field is necessary to conform with SQL semantics (CALCITE-1589)
-        generator.writeStartObject();
-        final boolean isCountStar = finalGranularity.equals(Granularities.all())
-            && aggregations.size() == 1
-            && aggregations.get(0).type.equals("count");
-        //Count(*) returns 0 if result set is empty thus need to set skipEmptyBuckets to false
-        generator.writeBooleanField("skipEmptyBuckets", !isCountStar);
-        generator.writeEndObject();
+    final String topNMetricColumnName = limit.collations.get(0).dimension;
+    final StringWriter sw = new StringWriter();
+    final JsonFactory factory = new JsonFactory();
+    try {
+      final JsonGenerator generator = factory.createGenerator(sw);
+      generator.writeStartObject();
 
-        generator.writeEndObject();
-        break;
+      generator.writeStringField("queryType", "topN");
+      generator.writeStringField("dataSource", druidTable.dataSource);
+      writeField(generator, "granularity", Granularities.all());
+      writeField(generator, "dimension", groupByKeyDims.get(0));
+      writeFieldIf(generator, "virtualColumns",
+          virtualColumnList.size() > 0 ? virtualColumnList : null);
+      generator.writeStringField("metric", topNMetricColumnName);
+      writeFieldIf(generator, "filter", jsonFilter);
+      writeField(generator, "aggregations", aggregations);
+      writeFieldIf(generator, "postAggregations",
+          postAggregations.size() > 0 ? postAggregations : null);
+      writeField(generator, "intervals", intervals);
+      generator.writeNumberField("threshold", limit.limit);
+      generator.writeEndObject();
+      generator.close();
+    } catch (IOException e) {
+      Throwables.propagate(e);
+    }
+    return sw.toString();
+  }
 
-      case TOP_N:
-        generator.writeStartObject();
+  @Nullable
+  private String planAsGroupBy(List<DimensionSpec> groupByKeyDims, DruidJsonFilter jsonFilter,
+      List<VirtualColumn> virtualColumnList, List<JsonAggregation> aggregations,
+      List<JsonExpressionPostAgg> postAggregations, JsonLimit limit, DruidJsonFilter havingFilter) {
+    final StringWriter sw = new StringWriter();
+    final JsonFactory factory = new JsonFactory();
+    try {
+      final JsonGenerator generator = factory.createGenerator(sw);
 
-        generator.writeStringField("queryType", "topN");
-        generator.writeStringField("dataSource", druidTable.dataSource);
-        writeField(generator, "granularity", finalGranularity);
-        writeField(generator, "dimension", dimensions.get(0));
-        generator.writeStringField("metric", fieldNames.get(collationIndexes.get(0)));
-        writeFieldIf(generator, "filter", jsonFilter);
-        writeField(generator, "aggregations", aggregations);
-        writeFieldIf(generator, "postAggregations", postAggs.size() > 0 ? postAggs : null);
-        writeField(generator, "intervals", intervals);
-        generator.writeNumberField("threshold", fetch);
+      generator.writeStartObject();
+      generator.writeStringField("queryType", "groupBy");
+      generator.writeStringField("dataSource", druidTable.dataSource);
+      writeField(generator, "granularity", Granularities.all());
+      writeField(generator, "dimensions", groupByKeyDims);
+      writeFieldIf(generator, "virtualColumns",
+          virtualColumnList.size() > 0 ? virtualColumnList : null);
+      writeFieldIf(generator, "limitSpec", limit);
+      writeFieldIf(generator, "filter", jsonFilter);
+      writeField(generator, "aggregations", aggregations);
+      writeFieldIf(generator, "postAggregations",
+          postAggregations.size() > 0 ? postAggregations : null);
+      writeField(generator, "intervals", intervals);
+      writeFieldIf(generator, "having",
+          havingFilter == null ? null : new DruidJsonFilter.JsonDimHavingFilter(havingFilter));
+      generator.writeEndObject();
+      generator.close();
+    } catch (IOException e) {
+      Throwables.propagate(e);
+    }
+    return sw.toString();
+  }
 
-        generator.writeEndObject();
-        break;
+  /**
+   * Druid Scan Query Body
+   */
+  private static class ScanQuery {
 
-      case GROUP_BY:
-        generator.writeStartObject();
-        generator.writeStringField("queryType", "groupBy");
-        generator.writeStringField("dataSource", druidTable.dataSource);
-        writeField(generator, "granularity", finalGranularity);
-        writeField(generator, "dimensions", dimensions);
-        writeFieldIf(generator, "limitSpec", limit);
-        writeFieldIf(generator, "filter", jsonFilter);
-        writeField(generator, "aggregations", aggregations);
-        writeFieldIf(generator, "postAggregations", postAggs.size() > 0 ? postAggs : null);
-        writeField(generator, "intervals", intervals);
-        writeFieldIf(generator, "having", null);
+    private String dataSource;
 
-        generator.writeEndObject();
-        break;
+    private List<Interval> intervals;
 
-      case SELECT:
-        generator.writeStartObject();
+    private DruidJsonFilter jsonFilter;
 
-        generator.writeStringField("queryType", "select");
-        generator.writeStringField("dataSource", druidTable.dataSource);
-        generator.writeBooleanField("descending", false);
-        writeField(generator, "intervals", intervals);
-        writeFieldIf(generator, "filter", jsonFilter);
-        writeField(generator, "dimensions", translator.dimensions);
-        writeField(generator, "metrics", translator.metrics);
-        writeField(generator, "granularity", finalGranularity);
+    private List<VirtualColumn> virtualColumnList;
 
-        generator.writeFieldName("pagingSpec");
-        generator.writeStartObject();
-        generator.writeNumberField("threshold", fetch != null ? fetch
-            : CalciteConnectionProperty.DRUID_FETCH.wrap(new Properties()).getInt());
-        generator.writeBooleanField("fromNext", true);
-        generator.writeEndObject();
+    private List<String> columns;
 
-        generator.writeFieldName("context");
-        generator.writeStartObject();
-        generator.writeBooleanField(DRUID_QUERY_FETCH, fetch != null);
-        generator.writeEndObject();
+    private Integer fetchLimit;
 
-        generator.writeEndObject();
-        break;
+    ScanQuery(String dataSource, List<Interval> intervals,
+        DruidJsonFilter jsonFilter,
+        List<VirtualColumn> virtualColumnList,
+        List<String> columns,
+        Integer fetchLimit) {
+      this.dataSource = dataSource;
+      this.intervals = intervals;
+      this.jsonFilter = jsonFilter;
+      this.virtualColumnList = virtualColumnList;
+      this.columns = columns;
+      this.fetchLimit = fetchLimit;
+    }
 
-      case SCAN:
+    public String toQuery() {
+      final StringWriter sw = new StringWriter();
+      try {
+        final JsonFactory factory = new JsonFactory();
+        final JsonGenerator generator = factory.createGenerator(sw);
         generator.writeStartObject();
-
         generator.writeStringField("queryType", "scan");
-        generator.writeStringField("dataSource", druidTable.dataSource);
+        generator.writeStringField("dataSource", dataSource);
         writeField(generator, "intervals", intervals);
         writeFieldIf(generator, "filter", jsonFilter);
-        writeField(generator, "columns",
-            Lists.transform(fieldNames, new Function<String, String>() {
-              @Override public String apply(String s) {
-                return s.equals(druidTable.timestampFieldName)
-                    ? DruidTable.DEFAULT_TIMESTAMP_COLUMN : s;
-              }
-            }));
-        writeField(generator, "granularity", finalGranularity);
+        writeFieldIf(generator, "virtualColumns",
+            virtualColumnList.size() > 0 ? virtualColumnList : null);
+        writeField(generator, "columns", columns);
         generator.writeStringField("resultFormat", "compactedList");
-        if (fetch != null) {
-          generator.writeNumberField("limit", fetch);
+        if (fetchLimit != null) {
+          generator.writeNumberField("limit", fetchLimit);
         }
-
         generator.writeEndObject();
-        break;
-
-      default:
-        throw new AssertionError("unknown query type " + queryType);
+        generator.close();
+      } catch (IOException e) {
+        Throwables.propagate(e);
       }
-
-      generator.close();
-    } catch (IOException e) {
-      e.printStackTrace();
+      return sw.toString();
     }
-
-    return new QuerySpec(queryType, sw.toString(), fieldNames);
   }
 
-  protected JsonAggregation getJsonAggregation(List<String> fieldNames,
-      String name, AggregateCall aggCall, List<RexNode> projects, Translator translator) {
-    final List<String> list = new ArrayList<>();
-    for (Integer arg : aggCall.getArgList()) {
-      list.add(fieldNames.get(arg));
-    }
-    final String only = Iterables.getFirst(list, null);
+  @Nullable
+  private static JsonAggregation getJsonAggregation(
+      String name, AggregateCall aggCall, RexNode filterNode, String fieldName,
+      String aggExpression,
+      DruidQuery druidQuery) {
     final boolean fractional;
     final RelDataType type = aggCall.getType();
     final SqlTypeName sqlTypeName = type.getSqlTypeName();
+    final JsonAggregation aggregation;
+    final CalciteConnectionConfig config = druidQuery.getConnectionConfig();
+
     if (SqlTypeFamily.APPROXIMATE_NUMERIC.getTypeNames().contains(sqlTypeName)) {
       fractional = true;
     } else if (SqlTypeFamily.INTEGER.getTypeNames().contains(sqlTypeName)) {
@@ -886,138 +1374,78 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
       }
     } else {
       // Cannot handle this aggregate function type
-      throw new AssertionError("unknown aggregate type " + type);
+      return null;
     }
 
-    JsonAggregation aggregation;
-
-    CalciteConnectionConfig config = getConnectionConfig();
-
     // Convert from a complex metric
-    ComplexMetric complexMetric = druidTable.resolveComplexMetric(only, aggCall);
+    ComplexMetric complexMetric = druidQuery.druidTable.resolveComplexMetric(fieldName, aggCall);
 
     switch (aggCall.getAggregation().getKind()) {
     case COUNT:
       if (aggCall.isDistinct()) {
         if (aggCall.isApproximate() || config.approximateDistinctCount()) {
           if (complexMetric == null) {
-            aggregation = new JsonCardinalityAggregation("cardinality", name, list);
+            aggregation = new JsonCardinalityAggregation("cardinality", name,
+                ImmutableList.of(fieldName));
           } else {
             aggregation = new JsonAggregation(complexMetric.getMetricType(), name,
-                    complexMetric.getMetricName());
+                    complexMetric.getMetricName(), null);
           }
           break;
         } else {
-          // Gets thrown if one of the rules allows a count(distinct ...) through
           // when approximate results were not told be acceptable.
-          throw new UnsupportedOperationException("Cannot push " + aggCall
-              + " because an approximate count distinct is not acceptable.");
+          return null;
         }
       }
-      if (aggCall.getArgList().size() == 1) {
+      if (aggCall.getArgList().size() == 1 && !aggCall.isDistinct()) {
         // case we have count(column) push it as count(*) where column is not null
-        final JsonFilter matchNulls = new JsonSelector(only, null, null);
-        final JsonFilter filterOutNulls = new JsonCompositeFilter(JsonFilter.Type.NOT, matchNulls);
-        aggregation = new JsonFilteredAggregation(filterOutNulls,
-            new JsonAggregation("count", name, only));
+        final DruidJsonFilter matchNulls;
+        if (fieldName == null) {
+          matchNulls = new DruidJsonFilter.JsonExpressionFilter(aggExpression + " == null");
+        } else {
+          matchNulls = DruidJsonFilter.getSelectorFilter(fieldName, null, null);
+        }
+        aggregation = new JsonFilteredAggregation(DruidJsonFilter.toNotDruidFilter(matchNulls),
+            new JsonAggregation("count", name, fieldName, aggExpression));
+      } else if (!aggCall.isDistinct()) {
+        aggregation = new JsonAggregation("count", name, fieldName, aggExpression);
       } else {
-        aggregation = new JsonAggregation("count", name, only);
+        aggregation = null;
       }
 
       break;
     case SUM:
     case SUM0:
-      aggregation = new JsonAggregation(fractional ? "doubleSum" : "longSum", name, only);
+      aggregation = new JsonAggregation(fractional ? "doubleSum" : "longSum", name, fieldName,
+          aggExpression);
       break;
     case MIN:
-      aggregation = new JsonAggregation(fractional ? "doubleMin" : "longMin", name, only);
+      aggregation = new JsonAggregation(fractional ? "doubleMin" : "longMin", name, fieldName,
+          aggExpression);
       break;
     case MAX:
-      aggregation = new JsonAggregation(fractional ? "doubleMax" : "longMax", name, only);
+      aggregation = new JsonAggregation(fractional ? "doubleMax" : "longMax", name, fieldName,
+          aggExpression);
       break;
     default:
-      throw new AssertionError("unknown aggregate " + aggCall);
+      return null;
     }
 
-    // Check for filters
-    if (aggCall.hasFilter()) {
-      RexCall filterNode = (RexCall) projects.get(aggCall.filterArg);
-      JsonFilter filter = translator.translateFilter(filterNode.getOperands().get(0));
-      aggregation = new JsonFilteredAggregation(filter, aggregation);
+    if (aggregation == null) {
+      return null;
     }
-
-    return aggregation;
-  }
-
-  public JsonPostAggregation getJsonPostAggregation(String name, RexNode rexNode, RelNode rel) {
-    if (rexNode instanceof RexCall) {
-      List<JsonPostAggregation> fields = new ArrayList<>();
-      for (RexNode ele : ((RexCall) rexNode).getOperands()) {
-        JsonPostAggregation field = getJsonPostAggregation("", ele, rel);
-        if (field == null) {
-          throw new RuntimeException("Unchecked types that cannot be parsed as Post Aggregator");
-        }
-        fields.add(field);
-      }
-      switch (rexNode.getKind()) {
-      case PLUS:
-        return new JsonArithmetic(name, "+", fields, null);
-      case MINUS:
-        return new JsonArithmetic(name, "-", fields, null);
-      case DIVIDE:
-        return new JsonArithmetic(name, "quotient", fields, null);
-      case TIMES:
-        return new JsonArithmetic(name, "*", fields, null);
-      case CAST:
-        return getJsonPostAggregation(name, ((RexCall) rexNode).getOperands().get(0),
-            rel);
-      default:
-      }
-    } else if (rexNode instanceof RexInputRef) {
-      // Subtract only number of grouping columns as offset because for now only Aggregates
-      // without grouping sets (i.e. indicator columns size is zero) are allowed to pushed
-      // in Druid Query.
-      Integer indexSkipGroup = ((RexInputRef) rexNode).getIndex()
-          - ((Aggregate) rel).getGroupCount();
-      AggregateCall aggCall = ((Aggregate) rel).getAggCallList().get(indexSkipGroup);
-      // Use either the hyper unique estimator, or the theta sketch one.
-      // Hyper unique is used by default.
-      if (aggCall.isDistinct()
-          && aggCall.getAggregation().getKind() == SqlKind.COUNT) {
-        final String fieldName = rel.getRowType().getFieldNames()
-                .get(((RexInputRef) rexNode).getIndex());
-
-        List<String> fieldNames = ((Aggregate) rel).getInput().getRowType().getFieldNames();
-        String complexName = fieldNames.get(aggCall.getArgList().get(0));
-        ComplexMetric metric = druidTable.resolveComplexMetric(complexName, aggCall);
-
-        if (metric != null) {
-          switch (metric.getDruidType()) {
-          case THETA_SKETCH:
-            return new JsonThetaSketchEstimate("", fieldName);
-          case HYPER_UNIQUE:
-            return new JsonHyperUniqueCardinality("", fieldName);
-          default:
-            throw new AssertionError("Can not translate complex metric type: "
-                    + metric.getDruidType());
-          }
-        }
-        // Count distinct on a non-complex column.
-        return new JsonHyperUniqueCardinality("", fieldName);
-      }
-      return new JsonFieldAccessor("",
-          rel.getRowType().getFieldNames().get(((RexInputRef) rexNode).getIndex()));
-    } else if (rexNode instanceof RexLiteral) {
-      // Druid constant post aggregator only supports numeric value for now.
-      // (http://druid.io/docs/0.10.0/querying/post-aggregations.html) Accordingly, all
-      // numeric type of RexLiteral can only have BigDecimal value, so filter out unsupported
-      // constant by checking the type of RexLiteral value.
-      if (((RexLiteral) rexNode).getValue3() instanceof BigDecimal) {
-        return new JsonConstant("",
-            ((BigDecimal) ((RexLiteral) rexNode).getValue3()).doubleValue());
+    // translate filters
+    if (filterNode != null) {
+      DruidJsonFilter druidFilter = DruidJsonFilter
+          .toDruidFilters(filterNode, druidQuery.table.getRowType(), druidQuery);
+      if (druidFilter == null) {
+        //can not translate filter
+        return null;
       }
+      return new JsonFilteredAggregation(druidFilter, aggregation);
     }
-    throw new RuntimeException("Unchecked types that cannot be parsed as Post Aggregator");
+
+    return aggregation;
   }
 
   protected static void writeField(JsonGenerator generator, String fieldName,
@@ -1054,8 +1482,8 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
       generator.writeNumber(i);
     } else if (o instanceof List) {
       writeArray(generator, (List<?>) o);
-    } else if (o instanceof Json) {
-      ((Json) o).write(generator);
+    } else if (o instanceof DruidJson) {
+      ((DruidJson) o).write(generator);
     } else {
       throw new AssertionError("not a json object: " + o);
     }
@@ -1126,249 +1554,6 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
     }
   }
 
-  /** Translates scalar expressions to Druid field references. */
-  @VisibleForTesting
-  protected static class Translator {
-    final List<String> dimensions = new ArrayList<>();
-    final List<String> metrics = new ArrayList<>();
-    final DruidTable druidTable;
-    final RelDataType rowType;
-    final String timeZone;
-    final SimpleDateFormat dateFormatter;
-
-    Translator(DruidTable druidTable, RelDataType rowType, String timeZone) {
-      this.druidTable = druidTable;
-      this.rowType = rowType;
-      for (RelDataTypeField f : rowType.getFieldList()) {
-        final String fieldName = f.getName();
-        if (druidTable.isMetric(fieldName)) {
-          metrics.add(fieldName);
-        } else if (!druidTable.timestampFieldName.equals(fieldName)
-            && !DruidTable.DEFAULT_TIMESTAMP_COLUMN.equals(fieldName)) {
-          dimensions.add(fieldName);
-        }
-      }
-      this.timeZone = timeZone;
-      this.dateFormatter = new SimpleDateFormat(TimeExtractionFunction.ISO_TIME_FORMAT,
-          Locale.ROOT);
-      if (timeZone != null) {
-        this.dateFormatter.setTimeZone(TimeZone.getTimeZone(timeZone));
-      }
-    }
-
-    protected void clearFieldNameLists() {
-      dimensions.clear();
-      metrics.clear();
-    }
-
-    /** Formats timestamp values to druid format using
-     * {@link DruidQuery.Translator#dateFormatter}. This is needed when pushing
-     * timestamp comparisons to druid using a TimeFormatExtractionFunction that
-     * returns a string value. */
-    @SuppressWarnings("incomplete-switch")
-    String translate(RexNode e, boolean set, boolean formatDateString) {
-      int index = -1;
-      switch (e.getKind()) {
-      case INPUT_REF:
-        final RexInputRef ref = (RexInputRef) e;
-        index = ref.getIndex();
-        break;
-      case CAST:
-        return tr(e, 0, set, formatDateString);
-      case LITERAL:
-        final RexLiteral rexLiteral = (RexLiteral) e;
-        if (!formatDateString) {
-          return Objects.toString(rexLiteral.getValue3());
-        } else {
-          // Case when we are passing to druid as an extractionFunction
-          // Need to format the timestamp String in druid format.
-          TimestampString timestampString = DruidDateTimeUtils
-              .literalValue(e, TimeZone.getTimeZone(timeZone));
-          if (timestampString == null) {
-            throw new AssertionError(
-                "Cannot translate Literal" + e + " of type "
-                    + rexLiteral.getTypeName() + " to TimestampString");
-          }
-          return dateFormatter.format(timestampString.getMillisSinceEpoch());
-        }
-      case FLOOR:
-      case EXTRACT:
-        final RexCall call = (RexCall) e;
-        assert DruidDateTimeUtils.extractGranularity(call, timeZone) != null;
-        index = RelOptUtil.InputFinder.bits(e).asList().get(0);
-        break;
-      case IS_TRUE:
-        return ""; // the fieldName for which this is the filter will be added separately
-      }
-      if (index == -1) {
-        throw new AssertionError("invalid expression " + e);
-      }
-      final String fieldName = rowType.getFieldList().get(index).getName();
-      if (set) {
-        if (druidTable.metricFieldNames.contains(fieldName)) {
-          metrics.add(fieldName);
-        } else if (!druidTable.timestampFieldName.equals(fieldName)
-            && !DruidTable.DEFAULT_TIMESTAMP_COLUMN.equals(fieldName)) {
-          dimensions.add(fieldName);
-        }
-      }
-      return fieldName;
-    }
-
-    private JsonFilter translateFilter(RexNode e) {
-      final RexCall call;
-      if (e.isAlwaysTrue()) {
-        return JsonExpressionFilter.alwaysTrue();
-      }
-      if (e.isAlwaysFalse()) {
-        return JsonExpressionFilter.alwaysFalse();
-      }
-      switch (e.getKind()) {
-      case EQUALS:
-      case NOT_EQUALS:
-      case GREATER_THAN:
-      case GREATER_THAN_OR_EQUAL:
-      case LESS_THAN:
-      case LESS_THAN_OR_EQUAL:
-      case IN:
-      case BETWEEN:
-      case IS_NULL:
-      case IS_NOT_NULL:
-        call = (RexCall) e;
-        int posRef;
-        int posConstant;
-        if (call.getOperands().size() == 1) { // IS NULL and IS NOT NULL
-          posRef = 0;
-          posConstant = -1;
-        } else if (RexUtil.isConstant(call.getOperands().get(1))) {
-          posRef = 0;
-          posConstant = 1;
-        } else if (RexUtil.isConstant(call.getOperands().get(0))) {
-          posRef = 1;
-          posConstant = 0;
-        } else {
-          throw new AssertionError("it is not a valid comparison: " + e);
-        }
-        RexNode posRefNode = call.getOperands().get(posRef);
-        final boolean numeric =
-            call.getOperands().get(posRef).getType().getFamily()
-                == SqlTypeFamily.NUMERIC;
-        boolean formatDateString = false;
-        final Granularity granularity =
-            DruidDateTimeUtils.extractGranularity(posRefNode, timeZone);
-        // in case no extraction the field will be omitted from the serialization
-        final ExtractionFunction extractionFunction;
-        if (granularity != null) {
-          switch (posRefNode.getKind()) {
-          case EXTRACT:
-            extractionFunction =
-                TimeExtractionFunction.createExtractFromGranularity(granularity,
-                    timeZone);
-            break;
-          case FLOOR:
-            extractionFunction =
-                TimeExtractionFunction.createFloorFromGranularity(granularity,
-                    timeZone);
-            formatDateString = true;
-            break;
-          default:
-            extractionFunction = null;
-          }
-        } else {
-          extractionFunction = null;
-        }
-        String dimName = tr(e, posRef, formatDateString);
-        if (dimName.equals(DruidConnectionImpl.DEFAULT_RESPONSE_TIMESTAMP_COLUMN)) {
-          // We need to use Druid default column name to refer to the time dimension in a filter
-          dimName = DruidTable.DEFAULT_TIMESTAMP_COLUMN;
-        }
-
-        switch (e.getKind()) {
-        case EQUALS:
-          // extractionFunction should be null because if we are using an extraction function
-          // we have guarantees about the format of the output and thus we can apply the
-          // normal selector
-          if (numeric && extractionFunction == null) {
-            String constantValue = tr(e, posConstant, formatDateString);
-            return new JsonBound(dimName, constantValue, false, constantValue, false,
-                numeric, extractionFunction);
-          }
-          return new JsonSelector(dimName, tr(e, posConstant, formatDateString),
-              extractionFunction);
-        case NOT_EQUALS:
-          // extractionFunction should be null because if we are using an extraction function
-          // we have guarantees about the format of the output and thus we can apply the
-          // normal selector
-          if (numeric && extractionFunction == null) {
-            String constantValue = tr(e, posConstant, formatDateString);
-            return new JsonCompositeFilter(JsonFilter.Type.OR,
-                new JsonBound(dimName, constantValue, true, null, false,
-                    numeric, extractionFunction),
-                new JsonBound(dimName, null, false, constantValue, true,
-                    numeric, extractionFunction));
-          }
-          return new JsonCompositeFilter(JsonFilter.Type.NOT,
-              new JsonSelector(dimName, tr(e, posConstant, formatDateString), extractionFunction));
-        case GREATER_THAN:
-          return new JsonBound(dimName, tr(e, posConstant, formatDateString),
-              true, null, false, numeric, extractionFunction);
-        case GREATER_THAN_OR_EQUAL:
-          return new JsonBound(dimName, tr(e, posConstant, formatDateString),
-              false, null, false, numeric, extractionFunction);
-        case LESS_THAN:
-          return new JsonBound(dimName, null, false,
-              tr(e, posConstant, formatDateString), true, numeric, extractionFunction);
-        case LESS_THAN_OR_EQUAL:
-          return new JsonBound(dimName, null, false,
-              tr(e, posConstant, formatDateString), false, numeric, extractionFunction);
-        case IN:
-          ImmutableList.Builder<String> listBuilder = ImmutableList.builder();
-          for (RexNode rexNode: call.getOperands()) {
-            if (rexNode.getKind() == SqlKind.LITERAL) {
-              listBuilder.add(Objects.toString(((RexLiteral) rexNode).getValue3()));
-            }
-          }
-          return new JsonInFilter(dimName, listBuilder.build(), extractionFunction);
-        case BETWEEN:
-          return new JsonBound(dimName, tr(e, 2, formatDateString), false,
-              tr(e, 3, formatDateString), false, numeric, extractionFunction);
-        case IS_NULL:
-          return new JsonSelector(dimName, null, extractionFunction);
-        case IS_NOT_NULL:
-          return new JsonCompositeFilter(JsonFilter.Type.NOT,
-              new JsonSelector(dimName, null, extractionFunction));
-        default:
-          throw new AssertionError();
-        }
-      case AND:
-      case OR:
-      case NOT:
-        call = (RexCall) e;
-        return new JsonCompositeFilter(JsonFilter.Type.valueOf(e.getKind().name()),
-            translateFilters(call.getOperands()));
-      default:
-        throw new AssertionError("cannot translate filter: " + e);
-      }
-    }
-
-    private String tr(RexNode call, int index, boolean formatDateString) {
-      return tr(call, index, false, formatDateString);
-    }
-
-    private String tr(RexNode call, int index, boolean set, boolean formatDateString) {
-      return translate(((RexCall) call).getOperands().get(index), set, formatDateString);
-    }
-
-    private List<JsonFilter> translateFilters(List<RexNode> operands) {
-      final ImmutableList.Builder<JsonFilter> builder =
-          ImmutableList.builder();
-      for (RexNode operand : operands) {
-        builder.add(translateFilter(operand));
-      }
-      return builder.build();
-    }
-  }
-
   /** Interpreter node that executes a Druid query and sends the results to a
    * {@link Sink}. */
   private static class DruidQueryNode implements Node {
@@ -1411,6 +1596,7 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
     private ColumnMetaData.Rep getPrimitive(RelDataTypeField field) {
       switch (field.getType().getSqlTypeName()) {
       case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
+      case TIMESTAMP:
         return ColumnMetaData.Rep.JAVA_SQL_TIMESTAMP;
       case BIGINT:
         return ColumnMetaData.Rep.LONG;
@@ -1431,22 +1617,18 @@ public class DruidQuery extends AbstractRelNode implements BindableRel {
     }
   }
 
-  /** Object that knows how to write itself to a
-   * {@link com.fasterxml.jackson.core.JsonGenerator}. */
-  public in

<TRUNCATED>