You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by ja...@apache.org on 2014/06/20 22:24:37 UTC

[01/32] git commit: DRILL-993: Remove redundant comparison in comparison operator generated by FreeMarker template.

Repository: incubator-drill
Updated Branches:
  refs/heads/master 894037ab6 -> efa3274bd


DRILL-993: Remove redundant comparison in comparison operator generated by FreeMarker template.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/27d3e714
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/27d3e714
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/27d3e714

Branch: refs/heads/master
Commit: 27d3e7145a7b1a43015fd4cb745e425443411b8c
Parents: 894037a
Author: Jinfeng Ni <jn...@maprtech.com>
Authored: Tue Jun 17 15:02:32 2014 -0700
Committer: Jinfeng Ni <jn...@maprtech.com>
Committed: Wed Jun 18 07:13:16 2014 -0700

----------------------------------------------------------------------
 .../codegen/templates/ComparisonFunctions.java  | 56 ++++++++++--
 .../templates/DateIntervalFunctions.java        | 91 +++++++++-----------
 2 files changed, 94 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/27d3e714/exec/java-exec/src/main/codegen/templates/ComparisonFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/ComparisonFunctions.java b/exec/java-exec/src/main/codegen/templates/ComparisonFunctions.java
index 9b0028c..b57a37f 100644
--- a/exec/java-exec/src/main/codegen/templates/ComparisonFunctions.java
+++ b/exec/java-exec/src/main/codegen/templates/ComparisonFunctions.java
@@ -141,10 +141,15 @@ public class GCompare${left}${right}{
           break sout;
         }
         </#if>
-        
+
+        <#if type.mode == "var" >
         int cmp;
         <@compareBlock mode=type.mode left=left right=right output="cmp" nullCompare=false/>
         out.value = cmp == -1 ? 1 : 0;
+        <#else>
+        out.value = left.value < right.value ? 1 : 0;
+        </#if>
+
         }
       }
   }
@@ -173,9 +178,14 @@ public class GCompare${left}${right}{
         }
         </#if>
         
+        <#if type.mode == "var" >
         int cmp;
         <@compareBlock mode=type.mode left=left right=right output="cmp" nullCompare=false/>
         out.value = cmp < 1 ? 1 : 0;
+        <#else>
+        out.value = left.value <= right.value ? 1 : 0;
+        </#if>
+
         }
     }
   }
@@ -204,9 +214,14 @@ public class GCompare${left}${right}{
         }
         </#if>
         
+        <#if type.mode == "var" >
         int cmp;
         <@compareBlock mode=type.mode left=left right=right output="cmp" nullCompare=false/>
         out.value = cmp == 1 ? 1 : 0;
+        <#else>
+        out.value = left.value > right.value ? 1 : 0;
+        </#if>
+
         }
     }
   }
@@ -235,9 +250,14 @@ public class GCompare${left}${right}{
         }
         </#if>
         
+        <#if type.mode == "var" >            
         int cmp;
         <@compareBlock mode=type.mode left=left right=right output="cmp" nullCompare=false/>
         out.value = cmp > -1 ? 1 : 0;
+        <#else>
+        out.value = left.value >= right.value ? 1 : 0;
+        </#if>
+
         }
       }
   }
@@ -265,10 +285,31 @@ public class GCompare${left}${right}{
             break sout;
           }
           </#if>
-          
-          int cmp;
-          <@compareBlock mode=type.mode left=left right=right output="cmp" nullCompare=false/>
-          out.value = cmp == 0 ? 1 : 0;
+        
+          <#if type.mode == "var" >
+outside: 
+        {          
+          if (left.end - left.start == right.end - right.start) {
+            int n = left.end - left.start;
+            int l = left.start;
+            int r = right.start;
+            while (n-- !=0) {
+              byte leftByte = left.buffer.getByte(l++);
+              byte rightByte = right.buffer.getByte(r++);
+              if (leftByte != rightByte) {
+                out.value = 0;
+                break outside;
+              }
+            }
+            out.value = 1;
+          } else {
+            out.value = 0;
+          }
+        } 
+          <#else>
+          out.value = left.value == right.value ? 1 : 0;
+          </#if>
+
         }
       }
   }
@@ -297,9 +338,14 @@ public class GCompare${left}${right}{
         }
         </#if>
         
+        <#if type.mode == "var" >            
         int cmp;
         <@compareBlock mode=type.mode left=left right=right output="cmp" nullCompare=false/>
         out.value = cmp == 0 ? 0 : 1;
+        <#else>
+        out.value = left.value != right.value ? 1 : 0;
+        </#if>
+        
         }
         
       }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/27d3e714/exec/java-exec/src/main/codegen/templates/DateIntervalFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/DateIntervalFunctions.java b/exec/java-exec/src/main/codegen/templates/DateIntervalFunctions.java
index 7afbcfc..a4bea61 100644
--- a/exec/java-exec/src/main/codegen/templates/DateIntervalFunctions.java
+++ b/exec/java-exec/src/main/codegen/templates/DateIntervalFunctions.java
@@ -27,6 +27,8 @@
 
 package org.apache.drill.exec.expr.fn.impl;
 
+import javax.xml.ws.Holder;
+
 import org.apache.drill.exec.expr.DrillSimpleFunc;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
@@ -63,9 +65,7 @@ public class GCompare${type.name}Functions {
       public void setup(RecordBatch b) {}
 
       public void eval() {
-
-          int cmp = (left.value > right.value) ? 1 : ((left.value < right.value) ? -1 : 0);
-          out.value = (cmp == -1) ? 1 : 0;
+        out.value = left.value < right.value ? 1 : 0;
       }
   }
 
@@ -79,9 +79,7 @@ public class GCompare${type.name}Functions {
       public void setup(RecordBatch b) {}
 
       public void eval() {
-
-          int cmp = (left.value > right.value) ? 1 : ((left.value < right.value) ? -1 : 0);
-          out.value = (cmp < 1) ? 1 : 0;
+        out.value = left.value <= right.value ? 1 : 0;
     }
   }
 
@@ -95,9 +93,7 @@ public class GCompare${type.name}Functions {
       public void setup(RecordBatch b) {}
 
       public void eval() {
-
-          int cmp = (left.value > right.value) ? 1 : ((left.value < right.value) ? -1 : 0);
-          out.value = (cmp == 1) ? 1 : 0;
+        out.value = left.value > right.value ? 1 : 0;
     }
   }
 
@@ -111,8 +107,7 @@ public class GCompare${type.name}Functions {
       public void setup(RecordBatch b) {}
 
       public void eval() {
-         int cmp = (left.value > right.value) ? 1 : ((left.value < right.value) ? -1 : 0);
-          out.value = (cmp > -1) ? 1 : 0;
+        out.value = left.value >= right.value ? 1 : 0;
       }
   }
 
@@ -126,9 +121,7 @@ public class GCompare${type.name}Functions {
       public void setup(RecordBatch b) {}
 
       public void eval() {
-
-          int cmp = (left.value > right.value) ? 1 : ((left.value < right.value) ? -1 : 0);
-          out.value = (cmp == 0) ? 1 : 0;
+        out.value = left.value == right.value ? 1 : 0;
       }
   }
 
@@ -142,8 +135,7 @@ public class GCompare${type.name}Functions {
       public void setup(RecordBatch b) {}
 
       public void eval() {
-          int cmp = (left.value > right.value) ? 1 : ((left.value < right.value) ? -1 : 0);
-          out.value = (cmp != 0) ? 1 : 0;
+        out.value = left.value != right.value ? 1 : 0;
       }
   }
 
@@ -174,6 +166,23 @@ outside: {
     }
 </#macro>
 
+<#macro intervalConvertBlock left right leftMonths leftDays leftMillis rightMonths rightDays rightMillis>
+        org.joda.time.MutableDateTime leftDate  = new org.joda.time.MutableDateTime(1970, 1, 1, 0, 0, 0, 0, org.joda.time.DateTimeZone.UTC);
+        org.joda.time.MutableDateTime rightDate = new org.joda.time.MutableDateTime(1970, 1, 1, 0, 0, 0, 0, org.joda.time.DateTimeZone.UTC);
+
+        // Left and right date have the same starting point (epoch), add the interval period and compare the two
+        leftDate.addMonths(${leftMonths});
+        leftDate.addDays(${leftDays});
+        leftDate.add(${leftMillis});
+
+        rightDate.addMonths(${rightMonths});
+        rightDate.addDays(${rightDays});
+        rightDate.add(${rightMillis});
+
+        long leftMS  = leftDate.getMillis();
+        long rightMS = rightDate.getMillis();
+</#macro>
+
 <@pp.changeOutputFile name="/org/apache/drill/exec/expr/fn/impl/GCompare${type.name}Functions.java" />
 
 <#include "/@includes/license.ftl" />
@@ -222,15 +231,13 @@ public class GCompare${type.name}Functions {
 
       public void eval() {
 
-          int cmp;
-
           <#if type.name == "Interval">
-          <@intervalCompareBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds"/>
           <#else>
-          <@intervalCompareBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds"/>
           </#if>
 
-          out.value = (cmp == -1) ? 1 : 0;
+          out.value = leftMS < rightMS ? 1 : 0;
       }
   }
 
@@ -245,15 +252,13 @@ public class GCompare${type.name}Functions {
 
       public void eval() {
 
-          int cmp;
-
           <#if type.name == "Interval">
-          <@intervalCompareBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds"/>
           <#else>
-          <@intervalCompareBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds"/>
           </#if>
 
-          out.value = (cmp < 1) ? 1 : 0;
+          out.value = leftMS <= rightMS ? 1 : 0;
     }
   }
 
@@ -268,15 +273,13 @@ public class GCompare${type.name}Functions {
 
       public void eval() {
 
-          int cmp;
-
           <#if type.name == "Interval">
-          <@intervalCompareBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds"/>
           <#else>
-          <@intervalCompareBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds"/>
           </#if>
 
-          out.value = (cmp == 1) ? 1 : 0;
+          out.value = leftMS > rightMS ? 1 : 0;
     }
   }
 
@@ -291,15 +294,13 @@ public class GCompare${type.name}Functions {
 
       public void eval() {
 
-          int cmp;
-
           <#if type.name == "Interval">
-          <@intervalCompareBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds"/>
           <#else>
-          <@intervalCompareBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds"/>
           </#if>
 
-          out.value = (cmp > -1) ? 1 : 0;
+          out.value = leftMS >= rightMS ? 1 : 0;
       }
   }
 
@@ -313,16 +314,13 @@ public class GCompare${type.name}Functions {
       public void setup(RecordBatch b) {}
 
       public void eval() {
-
-          int cmp;
-
           <#if type.name == "Interval">
-          <@intervalCompareBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds"/>
           <#else>
-          <@intervalCompareBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds"/>
           </#if>
 
-          out.value = (cmp == 0) ? 1 : 0;
+          out.value = leftMS == rightMS ? 1 : 0;
       }
   }
 
@@ -336,16 +334,13 @@ public class GCompare${type.name}Functions {
       public void setup(RecordBatch b) {}
 
       public void eval() {
-
-          int cmp;
-
           <#if type.name == "Interval">
-          <@intervalCompareBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="left.months" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="right.months" rightDays="right.days" rightMillis="right.milliSeconds"/>
           <#else>
-          <@intervalCompareBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds" output="cmp"/>
+          <@intervalConvertBlock left="left" right="right" leftMonths="0" leftDays="left.days" leftMillis="left.milliSeconds" rightMonths="0" rightDays="right.days" rightMillis="right.milliSeconds"/>
           </#if>
 
-          out.value = (cmp != 0) ? 1 : 0;
+          out.value = leftMS != rightMS ? 1 : 0;
       }
   }
 }


[17/32] git commit: DRILL-870: C++ Client. Support negative interval types.

Posted by ja...@apache.org.
DRILL-870: C++ Client. Support negative interval types.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/632f5ca9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/632f5ca9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/632f5ca9

Branch: refs/heads/master
Commit: 632f5ca9cb8f096c1f70265ecd37d97aed17089e
Parents: b90956e
Author: Xiao Meng <xi...@gmail.com>
Authored: Fri May 30 13:12:46 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Thu Jun 19 20:30:41 2014 -0700

----------------------------------------------------------------------
 .../native/client/src/clientlib/recordBatch.cpp | 60 ++++++++++++++------
 .../client/src/include/drill/recordBatch.hpp    | 26 ++++-----
 2 files changed, 55 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/632f5ca9/contrib/native/client/src/clientlib/recordBatch.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/recordBatch.cpp b/contrib/native/client/src/clientlib/recordBatch.cpp
index 90fe11a..81b9dbe 100644
--- a/contrib/native/client/src/clientlib/recordBatch.cpp
+++ b/contrib/native/client/src/clientlib/recordBatch.cpp
@@ -20,10 +20,11 @@
 #include "drill/recordBatch.hpp"
 #include "utils.hpp"
 
-const uint32_t YEARS_TO_MONTHS=12;
-const uint32_t HOURS_TO_MILLIS=60*60*1000;
-const uint32_t MINUTES_TO_MILLIS=60*1000;
-const uint32_t SECONDS_TO_MILLIS=1000;
+const int32_t YEARS_TO_MONTHS=12;
+const int32_t DAYS_TO_MILLIS=24*60*60*1000;
+const int32_t HOURS_TO_MILLIS=60*60*1000;
+const int32_t MINUTES_TO_MILLIS=60*1000;
+const int32_t SECONDS_TO_MILLIS=1000;
 extern "C"
 {
     #include "y2038/time64.h"
@@ -476,45 +477,68 @@ std::string DateTimeTZHolder::toString(){
 std::string IntervalYearHolder::toString(){
     std::stringstream sstr;
 
-    uint32_t years  = (m_month / YEARS_TO_MONTHS);
-    uint32_t months = (m_month % YEARS_TO_MONTHS);
+    bool isNegative = (m_month < 0);
+    int32_t m = (isNegative ? - m_month : m_month);
 
+    int32_t years  = (m / YEARS_TO_MONTHS);
+    int32_t months = (m % YEARS_TO_MONTHS);
+
+    if (isNegative) sstr << "-"; // put negative sign here if negative
     sstr << years << "-" << months;
     return sstr.str();
 };
 
+// Drill may populate data like 25 hours ("0 25:0:0.0"), we should normalize it to
+// 1 day 1 hour "1 1:0:0.0"
 std::string IntervalDayHolder::toString(){
     std::stringstream sstr;
 
-    uint32_t hours  = m_ms / (HOURS_TO_MILLIS);
-    uint32_t millis     = m_ms % (HOURS_TO_MILLIS);
+    bool isNegative = (m_day < 0) || ( m_day == 0 && m_ms < 0);
+    int32_t days = (m_day < 0 ? - m_day : m_day);
+    int32_t ms = (m_ms < 0 ? - m_ms : m_ms);
+
+    days += ms / (DAYS_TO_MILLIS);
+    int32_t millis = ms % (DAYS_TO_MILLIS);
+    int32_t hours  = millis / (HOURS_TO_MILLIS);
+    millis = millis % (HOURS_TO_MILLIS);
 
-    uint32_t minutes = millis / (MINUTES_TO_MILLIS);
+    int32_t minutes = millis / (MINUTES_TO_MILLIS);
     millis      = millis % (MINUTES_TO_MILLIS);
 
-    uint32_t seconds = millis / (SECONDS_TO_MILLIS);
+    int32_t seconds = millis / (SECONDS_TO_MILLIS);
     millis      = millis % (SECONDS_TO_MILLIS);
 
-    sstr << m_day<< " " << hours << ":"<<minutes<<":"<<seconds<<"."<<millis;
+    assert(hours >=0 && hours <= 23);
+    if(isNegative) sstr << "-";
+    sstr << days << " " << hours << ":"<<minutes<<":"<<seconds<<"."<<millis;
     return sstr.str();
 };
 
 std::string IntervalHolder::toString(){
     std::stringstream sstr;
 
-    uint32_t years  = (m_month / YEARS_TO_MONTHS);
-    uint32_t months = (m_month % YEARS_TO_MONTHS);
+    bool isNegative = (m_month < 0) || (m_month == 0 && m_day < 0 ) || (m_month == 0 && m_day == 0 && m_ms < 0);
+    int32_t m = (m_month < 0 ? - m_month : m_month);
+    int32_t days = (m_day < 0 ? - m_day : m_day);
+    int32_t ms = (m_ms < 0 ? - m_ms : m_ms);
+
+    int32_t years  = (m / YEARS_TO_MONTHS);
+    int32_t months = (m % YEARS_TO_MONTHS);
 
-    uint32_t hours  = m_ms / (HOURS_TO_MILLIS);
-    uint32_t millis     = m_ms % (HOURS_TO_MILLIS);
+    days   += ms / (DAYS_TO_MILLIS);
+    int32_t millis = ms % (DAYS_TO_MILLIS);
+    int32_t hours  = millis / (HOURS_TO_MILLIS);
+    millis      = millis % (HOURS_TO_MILLIS);
 
-    uint32_t minutes = millis / (MINUTES_TO_MILLIS);
+    int32_t minutes = millis / (MINUTES_TO_MILLIS);
     millis      = millis % (MINUTES_TO_MILLIS);
 
-    uint32_t seconds = millis / (SECONDS_TO_MILLIS);
+    int32_t seconds = millis / (SECONDS_TO_MILLIS);
     millis      = millis % (SECONDS_TO_MILLIS);
 
-    sstr << years << "-" << months<< "-" << m_day<< " " << hours << ":"<<minutes<<":"<<seconds<<"."<<millis;
+    assert(hours >=0 && hours <= 23);
+    if (isNegative) sstr << "-";
+    sstr << years << "-" << months<< "-" << days << " " << hours << ":"<<minutes<<":"<<seconds<<"."<<millis;
     return sstr.str();
 };
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/632f5ca9/contrib/native/client/src/include/drill/recordBatch.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/include/drill/recordBatch.hpp b/contrib/native/client/src/include/drill/recordBatch.hpp
index dab8b9b..984588f 100644
--- a/contrib/native/client/src/include/drill/recordBatch.hpp
+++ b/contrib/native/client/src/include/drill/recordBatch.hpp
@@ -454,41 +454,41 @@ struct DateTimeTZHolder: public DateTimeHolder{
 
 struct IntervalYearHolder{
     IntervalYearHolder(ByteBuf_t b){
-        m_month=*(uint32_t*)b;
+        m_month=*(int32_t*)b;
         load();
     }
     void load(){};
     std::string toString();
-    uint32_t m_month;
+    int32_t m_month;
     static uint32_t size(){ return sizeof(uint32_t); }
 };
 
 struct IntervalDayHolder{
     IntervalDayHolder(ByteBuf_t b){
-        m_day=*(uint32_t*)(b);
-        m_ms=*(uint32_t*)(b+sizeof(uint32_t));
+        m_day=*(int32_t*)(b);
+        m_ms=*(int32_t*)(b+sizeof(int32_t));
         load();
     }
     void load(){};
     std::string toString();
-    uint32_t m_day;
-    uint32_t m_ms;
+    int32_t m_day;
+    int32_t m_ms;
     static uint32_t size(){ return 2*sizeof(uint32_t)+4; }
 };
 
 struct IntervalHolder{
     IntervalHolder(ByteBuf_t b){
-        m_month=*(uint32_t*)b;
-        m_day=*(uint32_t*)(b+sizeof(uint32_t));
-        m_ms=*(uint32_t*)(b+2*sizeof(uint32_t));
+        m_month=*(int32_t*)b;
+        m_day=*(int32_t*)(b+sizeof(int32_t));
+        m_ms=*(int32_t*)(b+2*sizeof(int32_t));
         load();
     }
     void load(){};
     std::string toString();
-    uint32_t m_month;
-    uint32_t m_day;
-    uint32_t m_ms;
-    static uint32_t size(){ return 3*sizeof(uint32_t)+4; }
+    int32_t m_month;
+    int32_t m_day;
+    int32_t m_ms;
+    static uint32_t size(){ return 3*sizeof(int32_t)+4; }
 };
 
 /*


[12/32] DRILL-875: Fixes for DRILL-707, DRILL-780, DRILL-835 (Schema change), DRILL-852, DRILL-876, DRILL_877, DRILL-878, DRILL-890

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/protobuf/UserBitShared.pb.cc
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/UserBitShared.pb.cc b/contrib/native/client/src/protobuf/UserBitShared.pb.cc
index 1a0b634..154de54 100644
--- a/contrib/native/client/src/protobuf/UserBitShared.pb.cc
+++ b/contrib/native/client/src/protobuf/UserBitShared.pb.cc
@@ -43,7 +43,28 @@ const ::google::protobuf::EnumDescriptor* NamePart_Type_descriptor_ = NULL;
 const ::google::protobuf::Descriptor* SerializedField_descriptor_ = NULL;
 const ::google::protobuf::internal::GeneratedMessageReflection*
   SerializedField_reflection_ = NULL;
+const ::google::protobuf::Descriptor* QueryProfile_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+  QueryProfile_reflection_ = NULL;
+const ::google::protobuf::Descriptor* MajorFragmentProfile_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+  MajorFragmentProfile_reflection_ = NULL;
+const ::google::protobuf::Descriptor* MinorFragmentProfile_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+  MinorFragmentProfile_reflection_ = NULL;
+const ::google::protobuf::Descriptor* OperatorProfile_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+  OperatorProfile_reflection_ = NULL;
+const ::google::protobuf::Descriptor* StreamProfile_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+  StreamProfile_reflection_ = NULL;
+const ::google::protobuf::Descriptor* MetricValue_descriptor_ = NULL;
+const ::google::protobuf::internal::GeneratedMessageReflection*
+  MetricValue_reflection_ = NULL;
 const ::google::protobuf::EnumDescriptor* RpcChannel_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* QueryType_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* FragmentState_descriptor_ = NULL;
+const ::google::protobuf::EnumDescriptor* CoreOperatorType_descriptor_ = NULL;
 
 }  // namespace
 
@@ -178,7 +199,125 @@ void protobuf_AssignDesc_UserBitShared_2eproto() {
       ::google::protobuf::DescriptorPool::generated_pool(),
       ::google::protobuf::MessageFactory::generated_factory(),
       sizeof(SerializedField));
+  QueryProfile_descriptor_ = file->message_type(7);
+  static const int QueryProfile_offsets_[7] = {
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, id_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, type_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, start_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, end_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, query_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, plan_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, fragment_profile_),
+  };
+  QueryProfile_reflection_ =
+    new ::google::protobuf::internal::GeneratedMessageReflection(
+      QueryProfile_descriptor_,
+      QueryProfile::default_instance_,
+      QueryProfile_offsets_,
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, _has_bits_[0]),
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(QueryProfile, _unknown_fields_),
+      -1,
+      ::google::protobuf::DescriptorPool::generated_pool(),
+      ::google::protobuf::MessageFactory::generated_factory(),
+      sizeof(QueryProfile));
+  MajorFragmentProfile_descriptor_ = file->message_type(8);
+  static const int MajorFragmentProfile_offsets_[2] = {
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MajorFragmentProfile, major_fragment_id_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MajorFragmentProfile, minor_fragment_profile_),
+  };
+  MajorFragmentProfile_reflection_ =
+    new ::google::protobuf::internal::GeneratedMessageReflection(
+      MajorFragmentProfile_descriptor_,
+      MajorFragmentProfile::default_instance_,
+      MajorFragmentProfile_offsets_,
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MajorFragmentProfile, _has_bits_[0]),
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MajorFragmentProfile, _unknown_fields_),
+      -1,
+      ::google::protobuf::DescriptorPool::generated_pool(),
+      ::google::protobuf::MessageFactory::generated_factory(),
+      sizeof(MajorFragmentProfile));
+  MinorFragmentProfile_descriptor_ = file->message_type(9);
+  static const int MinorFragmentProfile_offsets_[9] = {
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, state_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, error_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, minor_fragment_id_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, operator_profile_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, start_time_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, end_time_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, memory_used_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, max_memory_used_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, endpoint_),
+  };
+  MinorFragmentProfile_reflection_ =
+    new ::google::protobuf::internal::GeneratedMessageReflection(
+      MinorFragmentProfile_descriptor_,
+      MinorFragmentProfile::default_instance_,
+      MinorFragmentProfile_offsets_,
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, _has_bits_[0]),
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MinorFragmentProfile, _unknown_fields_),
+      -1,
+      ::google::protobuf::DescriptorPool::generated_pool(),
+      ::google::protobuf::MessageFactory::generated_factory(),
+      sizeof(MinorFragmentProfile));
+  OperatorProfile_descriptor_ = file->message_type(10);
+  static const int OperatorProfile_offsets_[7] = {
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(OperatorProfile, input_profile_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(OperatorProfile, operator_id_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(OperatorProfile, operator_type_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(OperatorProfile, setup_nanos_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(OperatorProfile, process_nanos_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(OperatorProfile, local_memory_allocated_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(OperatorProfile, metric_),
+  };
+  OperatorProfile_reflection_ =
+    new ::google::protobuf::internal::GeneratedMessageReflection(
+      OperatorProfile_descriptor_,
+      OperatorProfile::default_instance_,
+      OperatorProfile_offsets_,
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(OperatorProfile, _has_bits_[0]),
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(OperatorProfile, _unknown_fields_),
+      -1,
+      ::google::protobuf::DescriptorPool::generated_pool(),
+      ::google::protobuf::MessageFactory::generated_factory(),
+      sizeof(OperatorProfile));
+  StreamProfile_descriptor_ = file->message_type(11);
+  static const int StreamProfile_offsets_[3] = {
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(StreamProfile, records_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(StreamProfile, batches_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(StreamProfile, schemas_),
+  };
+  StreamProfile_reflection_ =
+    new ::google::protobuf::internal::GeneratedMessageReflection(
+      StreamProfile_descriptor_,
+      StreamProfile::default_instance_,
+      StreamProfile_offsets_,
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(StreamProfile, _has_bits_[0]),
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(StreamProfile, _unknown_fields_),
+      -1,
+      ::google::protobuf::DescriptorPool::generated_pool(),
+      ::google::protobuf::MessageFactory::generated_factory(),
+      sizeof(StreamProfile));
+  MetricValue_descriptor_ = file->message_type(12);
+  static const int MetricValue_offsets_[3] = {
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricValue, metric_id_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricValue, long_value_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricValue, double_value_),
+  };
+  MetricValue_reflection_ =
+    new ::google::protobuf::internal::GeneratedMessageReflection(
+      MetricValue_descriptor_,
+      MetricValue::default_instance_,
+      MetricValue_offsets_,
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricValue, _has_bits_[0]),
+      GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(MetricValue, _unknown_fields_),
+      -1,
+      ::google::protobuf::DescriptorPool::generated_pool(),
+      ::google::protobuf::MessageFactory::generated_factory(),
+      sizeof(MetricValue));
   RpcChannel_descriptor_ = file->enum_type(0);
+  QueryType_descriptor_ = file->enum_type(1);
+  FragmentState_descriptor_ = file->enum_type(2);
+  CoreOperatorType_descriptor_ = file->enum_type(3);
 }
 
 namespace {
@@ -205,6 +344,18 @@ void protobuf_RegisterTypes(const ::std::string&) {
     NamePart_descriptor_, &NamePart::default_instance());
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
     SerializedField_descriptor_, &SerializedField::default_instance());
+  ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+    QueryProfile_descriptor_, &QueryProfile::default_instance());
+  ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+    MajorFragmentProfile_descriptor_, &MajorFragmentProfile::default_instance());
+  ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+    MinorFragmentProfile_descriptor_, &MinorFragmentProfile::default_instance());
+  ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+    OperatorProfile_descriptor_, &OperatorProfile::default_instance());
+  ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+    StreamProfile_descriptor_, &StreamProfile::default_instance());
+  ::google::protobuf::MessageFactory::InternalRegisterGeneratedMessage(
+    MetricValue_descriptor_, &MetricValue::default_instance());
 }
 
 }  // namespace
@@ -224,6 +375,18 @@ void protobuf_ShutdownFile_UserBitShared_2eproto() {
   delete NamePart_reflection_;
   delete SerializedField::default_instance_;
   delete SerializedField_reflection_;
+  delete QueryProfile::default_instance_;
+  delete QueryProfile_reflection_;
+  delete MajorFragmentProfile::default_instance_;
+  delete MajorFragmentProfile_reflection_;
+  delete MinorFragmentProfile::default_instance_;
+  delete MinorFragmentProfile_reflection_;
+  delete OperatorProfile::default_instance_;
+  delete OperatorProfile_reflection_;
+  delete StreamProfile::default_instance_;
+  delete StreamProfile_reflection_;
+  delete MetricValue::default_instance_;
+  delete MetricValue_reflection_;
 }
 
 void protobuf_AddDesc_UserBitShared_2eproto() {
@@ -258,10 +421,55 @@ void protobuf_AddDesc_UserBitShared_2eproto() {
     "hared.NamePart\022+\n\005child\030\003 \003(\0132\034.exec.sha"
     "red.SerializedField\022\023\n\013value_count\030\004 \001(\005"
     "\022\027\n\017var_byte_length\030\005 \001(\005\022\023\n\013group_count"
-    "\030\006 \001(\005\022\025\n\rbuffer_length\030\007 \001(\005*5\n\nRpcChan"
-    "nel\022\017\n\013BIT_CONTROL\020\000\022\014\n\010BIT_DATA\020\001\022\010\n\004US"
-    "ER\020\002B.\n\033org.apache.drill.exec.protoB\rUse"
-    "rBitSharedH\001", 1012);
+    "\030\006 \001(\005\022\025\n\rbuffer_length\030\007 \001(\005\"\314\001\n\014QueryP"
+    "rofile\022 \n\002id\030\001 \001(\0132\024.exec.shared.QueryId"
+    "\022$\n\004type\030\002 \001(\0162\026.exec.shared.QueryType\022\r"
+    "\n\005start\030\003 \001(\003\022\013\n\003end\030\004 \001(\003\022\r\n\005query\030\005 \001("
+    "\t\022\014\n\004plan\030\006 \001(\t\022;\n\020fragment_profile\030\007 \003("
+    "\0132!.exec.shared.MajorFragmentProfile\"t\n\024"
+    "MajorFragmentProfile\022\031\n\021major_fragment_i"
+    "d\030\001 \001(\005\022A\n\026minor_fragment_profile\030\002 \003(\0132"
+    "!.exec.shared.MinorFragmentProfile\"\274\002\n\024M"
+    "inorFragmentProfile\022)\n\005state\030\001 \001(\0162\032.exe"
+    "c.shared.FragmentState\022(\n\005error\030\002 \001(\0132\031."
+    "exec.shared.DrillPBError\022\031\n\021minor_fragme"
+    "nt_id\030\003 \001(\005\0226\n\020operator_profile\030\004 \003(\0132\034."
+    "exec.shared.OperatorProfile\022\022\n\nstart_tim"
+    "e\030\005 \001(\003\022\020\n\010end_time\030\006 \001(\003\022\023\n\013memory_used"
+    "\030\007 \001(\003\022\027\n\017max_memory_used\030\010 \001(\003\022(\n\010endpo"
+    "int\030\t \001(\0132\026.exec.DrillbitEndpoint\"\346\001\n\017Op"
+    "eratorProfile\0221\n\rinput_profile\030\001 \003(\0132\032.e"
+    "xec.shared.StreamProfile\022\023\n\013operator_id\030"
+    "\003 \001(\005\022\025\n\roperator_type\030\004 \001(\005\022\023\n\013setup_na"
+    "nos\030\005 \001(\003\022\025\n\rprocess_nanos\030\006 \001(\003\022\036\n\026loca"
+    "l_memory_allocated\030\007 \001(\003\022(\n\006metric\030\010 \003(\013"
+    "2\030.exec.shared.MetricValue\"B\n\rStreamProf"
+    "ile\022\017\n\007records\030\001 \001(\003\022\017\n\007batches\030\002 \001(\003\022\017\n"
+    "\007schemas\030\003 \001(\003\"J\n\013MetricValue\022\021\n\tmetric_"
+    "id\030\001 \001(\005\022\022\n\nlong_value\030\002 \001(\003\022\024\n\014double_v"
+    "alue\030\003 \001(\001*5\n\nRpcChannel\022\017\n\013BIT_CONTROL\020"
+    "\000\022\014\n\010BIT_DATA\020\001\022\010\n\004USER\020\002*/\n\tQueryType\022\007"
+    "\n\003SQL\020\001\022\013\n\007LOGICAL\020\002\022\014\n\010PHYSICAL\020\003*k\n\rFr"
+    "agmentState\022\013\n\007SENDING\020\000\022\027\n\023AWAITING_ALL"
+    "OCATION\020\001\022\013\n\007RUNNING\020\002\022\014\n\010FINISHED\020\003\022\r\n\t"
+    "CANCELLED\020\004\022\n\n\006FAILED\020\005*\345\004\n\020CoreOperator"
+    "Type\022\021\n\rSINGLE_SENDER\020\000\022\024\n\020BROADCAST_SEN"
+    "DER\020\001\022\n\n\006FILTER\020\002\022\022\n\016HASH_AGGREGATE\020\003\022\r\n"
+    "\tHASH_JOIN\020\004\022\016\n\nMERGE_JOIN\020\005\022\031\n\025HASH_PAR"
+    "TITION_SENDER\020\006\022\t\n\005LIMIT\020\007\022\024\n\020MERGING_RE"
+    "CEIVER\020\010\022\034\n\030ORDERED_PARTITION_SENDER\020\t\022\013"
+    "\n\007PROJECT\020\n\022\023\n\017RANDOM_RECEIVER\020\013\022\020\n\014RANG"
+    "E_SENDER\020\014\022\n\n\006SCREEN\020\r\022\034\n\030SELECTION_VECT"
+    "OR_REMOVER\020\016\022\027\n\023STREAMING_AGGREGATE\020\017\022\016\n"
+    "\nTOP_N_SORT\020\020\022\021\n\rEXTERNAL_SORT\020\021\022\t\n\005TRAC"
+    "E\020\022\022\t\n\005UNION\020\023\022\014\n\010OLD_SORT\020\024\022\032\n\026PARQUET_"
+    "ROW_GROUP_SCAN\020\025\022\021\n\rHIVE_SUB_SCAN\020\026\022\025\n\021S"
+    "YSTEM_TABLE_SCAN\020\027\022\021\n\rMOCK_SUB_SCAN\020\030\022\022\n"
+    "\016PARQUET_WRITER\020\031\022\023\n\017DIRECT_SUB_SCAN\020\032\022\017"
+    "\n\013TEXT_WRITER\020\033\022\021\n\rTEXT_SUB_SCAN\020\034\022\021\n\rJS"
+    "ON_SUB_SCAN\020\035\022\030\n\024INFO_SCHEMA_SUB_SCAN\020\036B"
+    ".\n\033org.apache.drill.exec.protoB\rUserBitS"
+    "haredH\001", 2807);
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
     "UserBitShared.proto", &protobuf_RegisterTypes);
   UserCredentials::default_instance_ = new UserCredentials();
@@ -271,6 +479,12 @@ void protobuf_AddDesc_UserBitShared_2eproto() {
   RecordBatchDef::default_instance_ = new RecordBatchDef();
   NamePart::default_instance_ = new NamePart();
   SerializedField::default_instance_ = new SerializedField();
+  QueryProfile::default_instance_ = new QueryProfile();
+  MajorFragmentProfile::default_instance_ = new MajorFragmentProfile();
+  MinorFragmentProfile::default_instance_ = new MinorFragmentProfile();
+  OperatorProfile::default_instance_ = new OperatorProfile();
+  StreamProfile::default_instance_ = new StreamProfile();
+  MetricValue::default_instance_ = new MetricValue();
   UserCredentials::default_instance_->InitAsDefaultInstance();
   QueryId::default_instance_->InitAsDefaultInstance();
   DrillPBError::default_instance_->InitAsDefaultInstance();
@@ -278,6 +492,12 @@ void protobuf_AddDesc_UserBitShared_2eproto() {
   RecordBatchDef::default_instance_->InitAsDefaultInstance();
   NamePart::default_instance_->InitAsDefaultInstance();
   SerializedField::default_instance_->InitAsDefaultInstance();
+  QueryProfile::default_instance_->InitAsDefaultInstance();
+  MajorFragmentProfile::default_instance_->InitAsDefaultInstance();
+  MinorFragmentProfile::default_instance_->InitAsDefaultInstance();
+  OperatorProfile::default_instance_->InitAsDefaultInstance();
+  StreamProfile::default_instance_->InitAsDefaultInstance();
+  MetricValue::default_instance_->InitAsDefaultInstance();
   ::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_UserBitShared_2eproto);
 }
 
@@ -302,6 +522,82 @@ bool RpcChannel_IsValid(int value) {
   }
 }
 
+const ::google::protobuf::EnumDescriptor* QueryType_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return QueryType_descriptor_;
+}
+bool QueryType_IsValid(int value) {
+  switch(value) {
+    case 1:
+    case 2:
+    case 3:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* FragmentState_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return FragmentState_descriptor_;
+}
+bool FragmentState_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+    case 2:
+    case 3:
+    case 4:
+    case 5:
+      return true;
+    default:
+      return false;
+  }
+}
+
+const ::google::protobuf::EnumDescriptor* CoreOperatorType_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return CoreOperatorType_descriptor_;
+}
+bool CoreOperatorType_IsValid(int value) {
+  switch(value) {
+    case 0:
+    case 1:
+    case 2:
+    case 3:
+    case 4:
+    case 5:
+    case 6:
+    case 7:
+    case 8:
+    case 9:
+    case 10:
+    case 11:
+    case 12:
+    case 13:
+    case 14:
+    case 15:
+    case 16:
+    case 17:
+    case 18:
+    case 19:
+    case 20:
+    case 21:
+    case 22:
+    case 23:
+    case 24:
+    case 25:
+    case 26:
+    case 27:
+    case 28:
+    case 29:
+    case 30:
+      return true;
+    default:
+      return false;
+  }
+}
+
 
 // ===================================================================
 
@@ -2588,6 +2884,2320 @@ void SerializedField::Swap(SerializedField* other) {
 }
 
 
+// ===================================================================
+
+#ifndef _MSC_VER
+const int QueryProfile::kIdFieldNumber;
+const int QueryProfile::kTypeFieldNumber;
+const int QueryProfile::kStartFieldNumber;
+const int QueryProfile::kEndFieldNumber;
+const int QueryProfile::kQueryFieldNumber;
+const int QueryProfile::kPlanFieldNumber;
+const int QueryProfile::kFragmentProfileFieldNumber;
+#endif  // !_MSC_VER
+
+QueryProfile::QueryProfile()
+  : ::google::protobuf::Message() {
+  SharedCtor();
+}
+
+void QueryProfile::InitAsDefaultInstance() {
+  id_ = const_cast< ::exec::shared::QueryId*>(&::exec::shared::QueryId::default_instance());
+}
+
+QueryProfile::QueryProfile(const QueryProfile& from)
+  : ::google::protobuf::Message() {
+  SharedCtor();
+  MergeFrom(from);
+}
+
+void QueryProfile::SharedCtor() {
+  _cached_size_ = 0;
+  id_ = NULL;
+  type_ = 1;
+  start_ = GOOGLE_LONGLONG(0);
+  end_ = GOOGLE_LONGLONG(0);
+  query_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  plan_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+QueryProfile::~QueryProfile() {
+  SharedDtor();
+}
+
+void QueryProfile::SharedDtor() {
+  if (query_ != &::google::protobuf::internal::kEmptyString) {
+    delete query_;
+  }
+  if (plan_ != &::google::protobuf::internal::kEmptyString) {
+    delete plan_;
+  }
+  if (this != default_instance_) {
+    delete id_;
+  }
+}
+
+void QueryProfile::SetCachedSize(int size) const {
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* QueryProfile::descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return QueryProfile_descriptor_;
+}
+
+const QueryProfile& QueryProfile::default_instance() {
+  if (default_instance_ == NULL) protobuf_AddDesc_UserBitShared_2eproto();
+  return *default_instance_;
+}
+
+QueryProfile* QueryProfile::default_instance_ = NULL;
+
+QueryProfile* QueryProfile::New() const {
+  return new QueryProfile;
+}
+
+void QueryProfile::Clear() {
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    if (has_id()) {
+      if (id_ != NULL) id_->::exec::shared::QueryId::Clear();
+    }
+    type_ = 1;
+    start_ = GOOGLE_LONGLONG(0);
+    end_ = GOOGLE_LONGLONG(0);
+    if (has_query()) {
+      if (query_ != &::google::protobuf::internal::kEmptyString) {
+        query_->clear();
+      }
+    }
+    if (has_plan()) {
+      if (plan_ != &::google::protobuf::internal::kEmptyString) {
+        plan_->clear();
+      }
+    }
+  }
+  fragment_profile_.Clear();
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+  mutable_unknown_fields()->Clear();
+}
+
+bool QueryProfile::MergePartialFromCodedStream(
+    ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+  ::google::protobuf::uint32 tag;
+  while ((tag = input->ReadTag()) != 0) {
+    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+      // optional .exec.shared.QueryId id = 1;
+      case 1: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+               input, mutable_id()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(16)) goto parse_type;
+        break;
+      }
+
+      // optional .exec.shared.QueryType type = 2;
+      case 2: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_type:
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::exec::shared::QueryType_IsValid(value)) {
+            set_type(static_cast< ::exec::shared::QueryType >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(2, value);
+          }
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(24)) goto parse_start;
+        break;
+      }
+
+      // optional int64 start = 3;
+      case 3: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_start:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &start_)));
+          set_has_start();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(32)) goto parse_end;
+        break;
+      }
+
+      // optional int64 end = 4;
+      case 4: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_end:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &end_)));
+          set_has_end();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(42)) goto parse_query;
+        break;
+      }
+
+      // optional string query = 5;
+      case 5: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_query:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+                input, this->mutable_query()));
+          ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+            this->query().data(), this->query().length(),
+            ::google::protobuf::internal::WireFormat::PARSE);
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(50)) goto parse_plan;
+        break;
+      }
+
+      // optional string plan = 6;
+      case 6: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_plan:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadString(
+                input, this->mutable_plan()));
+          ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+            this->plan().data(), this->plan().length(),
+            ::google::protobuf::internal::WireFormat::PARSE);
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(58)) goto parse_fragment_profile;
+        break;
+      }
+
+      // repeated .exec.shared.MajorFragmentProfile fragment_profile = 7;
+      case 7: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_fragment_profile:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+                input, add_fragment_profile()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(58)) goto parse_fragment_profile;
+        if (input->ExpectAtEnd()) return true;
+        break;
+      }
+
+      default: {
+      handle_uninterpreted:
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+          return true;
+        }
+        DO_(::google::protobuf::internal::WireFormat::SkipField(
+              input, tag, mutable_unknown_fields()));
+        break;
+      }
+    }
+  }
+  return true;
+#undef DO_
+}
+
+void QueryProfile::SerializeWithCachedSizes(
+    ::google::protobuf::io::CodedOutputStream* output) const {
+  // optional .exec.shared.QueryId id = 1;
+  if (has_id()) {
+    ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+      1, this->id(), output);
+  }
+
+  // optional .exec.shared.QueryType type = 2;
+  if (has_type()) {
+    ::google::protobuf::internal::WireFormatLite::WriteEnum(
+      2, this->type(), output);
+  }
+
+  // optional int64 start = 3;
+  if (has_start()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(3, this->start(), output);
+  }
+
+  // optional int64 end = 4;
+  if (has_end()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(4, this->end(), output);
+  }
+
+  // optional string query = 5;
+  if (has_query()) {
+    ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+      this->query().data(), this->query().length(),
+      ::google::protobuf::internal::WireFormat::SERIALIZE);
+    ::google::protobuf::internal::WireFormatLite::WriteString(
+      5, this->query(), output);
+  }
+
+  // optional string plan = 6;
+  if (has_plan()) {
+    ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+      this->plan().data(), this->plan().length(),
+      ::google::protobuf::internal::WireFormat::SERIALIZE);
+    ::google::protobuf::internal::WireFormatLite::WriteString(
+      6, this->plan(), output);
+  }
+
+  // repeated .exec.shared.MajorFragmentProfile fragment_profile = 7;
+  for (int i = 0; i < this->fragment_profile_size(); i++) {
+    ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+      7, this->fragment_profile(i), output);
+  }
+
+  if (!unknown_fields().empty()) {
+    ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+        unknown_fields(), output);
+  }
+}
+
+::google::protobuf::uint8* QueryProfile::SerializeWithCachedSizesToArray(
+    ::google::protobuf::uint8* target) const {
+  // optional .exec.shared.QueryId id = 1;
+  if (has_id()) {
+    target = ::google::protobuf::internal::WireFormatLite::
+      WriteMessageNoVirtualToArray(
+        1, this->id(), target);
+  }
+
+  // optional .exec.shared.QueryType type = 2;
+  if (has_type()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
+      2, this->type(), target);
+  }
+
+  // optional int64 start = 3;
+  if (has_start()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(3, this->start(), target);
+  }
+
+  // optional int64 end = 4;
+  if (has_end()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(4, this->end(), target);
+  }
+
+  // optional string query = 5;
+  if (has_query()) {
+    ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+      this->query().data(), this->query().length(),
+      ::google::protobuf::internal::WireFormat::SERIALIZE);
+    target =
+      ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+        5, this->query(), target);
+  }
+
+  // optional string plan = 6;
+  if (has_plan()) {
+    ::google::protobuf::internal::WireFormat::VerifyUTF8String(
+      this->plan().data(), this->plan().length(),
+      ::google::protobuf::internal::WireFormat::SERIALIZE);
+    target =
+      ::google::protobuf::internal::WireFormatLite::WriteStringToArray(
+        6, this->plan(), target);
+  }
+
+  // repeated .exec.shared.MajorFragmentProfile fragment_profile = 7;
+  for (int i = 0; i < this->fragment_profile_size(); i++) {
+    target = ::google::protobuf::internal::WireFormatLite::
+      WriteMessageNoVirtualToArray(
+        7, this->fragment_profile(i), target);
+  }
+
+  if (!unknown_fields().empty()) {
+    target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+        unknown_fields(), target);
+  }
+  return target;
+}
+
+int QueryProfile::ByteSize() const {
+  int total_size = 0;
+
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    // optional .exec.shared.QueryId id = 1;
+    if (has_id()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+          this->id());
+    }
+
+    // optional .exec.shared.QueryType type = 2;
+    if (has_type()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::EnumSize(this->type());
+    }
+
+    // optional int64 start = 3;
+    if (has_start()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->start());
+    }
+
+    // optional int64 end = 4;
+    if (has_end()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->end());
+    }
+
+    // optional string query = 5;
+    if (has_query()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::StringSize(
+          this->query());
+    }
+
+    // optional string plan = 6;
+    if (has_plan()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::StringSize(
+          this->plan());
+    }
+
+  }
+  // repeated .exec.shared.MajorFragmentProfile fragment_profile = 7;
+  total_size += 1 * this->fragment_profile_size();
+  for (int i = 0; i < this->fragment_profile_size(); i++) {
+    total_size +=
+      ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+        this->fragment_profile(i));
+  }
+
+  if (!unknown_fields().empty()) {
+    total_size +=
+      ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+        unknown_fields());
+  }
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = total_size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+  return total_size;
+}
+
+void QueryProfile::MergeFrom(const ::google::protobuf::Message& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  const QueryProfile* source =
+    ::google::protobuf::internal::dynamic_cast_if_available<const QueryProfile*>(
+      &from);
+  if (source == NULL) {
+    ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+  } else {
+    MergeFrom(*source);
+  }
+}
+
+void QueryProfile::MergeFrom(const QueryProfile& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  fragment_profile_.MergeFrom(from.fragment_profile_);
+  if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    if (from.has_id()) {
+      mutable_id()->::exec::shared::QueryId::MergeFrom(from.id());
+    }
+    if (from.has_type()) {
+      set_type(from.type());
+    }
+    if (from.has_start()) {
+      set_start(from.start());
+    }
+    if (from.has_end()) {
+      set_end(from.end());
+    }
+    if (from.has_query()) {
+      set_query(from.query());
+    }
+    if (from.has_plan()) {
+      set_plan(from.plan());
+    }
+  }
+  mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void QueryProfile::CopyFrom(const ::google::protobuf::Message& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+void QueryProfile::CopyFrom(const QueryProfile& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+bool QueryProfile::IsInitialized() const {
+
+  return true;
+}
+
+void QueryProfile::Swap(QueryProfile* other) {
+  if (other != this) {
+    std::swap(id_, other->id_);
+    std::swap(type_, other->type_);
+    std::swap(start_, other->start_);
+    std::swap(end_, other->end_);
+    std::swap(query_, other->query_);
+    std::swap(plan_, other->plan_);
+    fragment_profile_.Swap(&other->fragment_profile_);
+    std::swap(_has_bits_[0], other->_has_bits_[0]);
+    _unknown_fields_.Swap(&other->_unknown_fields_);
+    std::swap(_cached_size_, other->_cached_size_);
+  }
+}
+
+::google::protobuf::Metadata QueryProfile::GetMetadata() const {
+  protobuf_AssignDescriptorsOnce();
+  ::google::protobuf::Metadata metadata;
+  metadata.descriptor = QueryProfile_descriptor_;
+  metadata.reflection = QueryProfile_reflection_;
+  return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int MajorFragmentProfile::kMajorFragmentIdFieldNumber;
+const int MajorFragmentProfile::kMinorFragmentProfileFieldNumber;
+#endif  // !_MSC_VER
+
+MajorFragmentProfile::MajorFragmentProfile()
+  : ::google::protobuf::Message() {
+  SharedCtor();
+}
+
+void MajorFragmentProfile::InitAsDefaultInstance() {
+}
+
+MajorFragmentProfile::MajorFragmentProfile(const MajorFragmentProfile& from)
+  : ::google::protobuf::Message() {
+  SharedCtor();
+  MergeFrom(from);
+}
+
+void MajorFragmentProfile::SharedCtor() {
+  _cached_size_ = 0;
+  major_fragment_id_ = 0;
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+MajorFragmentProfile::~MajorFragmentProfile() {
+  SharedDtor();
+}
+
+void MajorFragmentProfile::SharedDtor() {
+  if (this != default_instance_) {
+  }
+}
+
+void MajorFragmentProfile::SetCachedSize(int size) const {
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* MajorFragmentProfile::descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return MajorFragmentProfile_descriptor_;
+}
+
+const MajorFragmentProfile& MajorFragmentProfile::default_instance() {
+  if (default_instance_ == NULL) protobuf_AddDesc_UserBitShared_2eproto();
+  return *default_instance_;
+}
+
+MajorFragmentProfile* MajorFragmentProfile::default_instance_ = NULL;
+
+MajorFragmentProfile* MajorFragmentProfile::New() const {
+  return new MajorFragmentProfile;
+}
+
+void MajorFragmentProfile::Clear() {
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    major_fragment_id_ = 0;
+  }
+  minor_fragment_profile_.Clear();
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+  mutable_unknown_fields()->Clear();
+}
+
+bool MajorFragmentProfile::MergePartialFromCodedStream(
+    ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+  ::google::protobuf::uint32 tag;
+  while ((tag = input->ReadTag()) != 0) {
+    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+      // optional int32 major_fragment_id = 1;
+      case 1: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+                 input, &major_fragment_id_)));
+          set_has_major_fragment_id();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(18)) goto parse_minor_fragment_profile;
+        break;
+      }
+
+      // repeated .exec.shared.MinorFragmentProfile minor_fragment_profile = 2;
+      case 2: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_minor_fragment_profile:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+                input, add_minor_fragment_profile()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(18)) goto parse_minor_fragment_profile;
+        if (input->ExpectAtEnd()) return true;
+        break;
+      }
+
+      default: {
+      handle_uninterpreted:
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+          return true;
+        }
+        DO_(::google::protobuf::internal::WireFormat::SkipField(
+              input, tag, mutable_unknown_fields()));
+        break;
+      }
+    }
+  }
+  return true;
+#undef DO_
+}
+
+void MajorFragmentProfile::SerializeWithCachedSizes(
+    ::google::protobuf::io::CodedOutputStream* output) const {
+  // optional int32 major_fragment_id = 1;
+  if (has_major_fragment_id()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt32(1, this->major_fragment_id(), output);
+  }
+
+  // repeated .exec.shared.MinorFragmentProfile minor_fragment_profile = 2;
+  for (int i = 0; i < this->minor_fragment_profile_size(); i++) {
+    ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+      2, this->minor_fragment_profile(i), output);
+  }
+
+  if (!unknown_fields().empty()) {
+    ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+        unknown_fields(), output);
+  }
+}
+
+::google::protobuf::uint8* MajorFragmentProfile::SerializeWithCachedSizesToArray(
+    ::google::protobuf::uint8* target) const {
+  // optional int32 major_fragment_id = 1;
+  if (has_major_fragment_id()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(1, this->major_fragment_id(), target);
+  }
+
+  // repeated .exec.shared.MinorFragmentProfile minor_fragment_profile = 2;
+  for (int i = 0; i < this->minor_fragment_profile_size(); i++) {
+    target = ::google::protobuf::internal::WireFormatLite::
+      WriteMessageNoVirtualToArray(
+        2, this->minor_fragment_profile(i), target);
+  }
+
+  if (!unknown_fields().empty()) {
+    target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+        unknown_fields(), target);
+  }
+  return target;
+}
+
+int MajorFragmentProfile::ByteSize() const {
+  int total_size = 0;
+
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    // optional int32 major_fragment_id = 1;
+    if (has_major_fragment_id()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int32Size(
+          this->major_fragment_id());
+    }
+
+  }
+  // repeated .exec.shared.MinorFragmentProfile minor_fragment_profile = 2;
+  total_size += 1 * this->minor_fragment_profile_size();
+  for (int i = 0; i < this->minor_fragment_profile_size(); i++) {
+    total_size +=
+      ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+        this->minor_fragment_profile(i));
+  }
+
+  if (!unknown_fields().empty()) {
+    total_size +=
+      ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+        unknown_fields());
+  }
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = total_size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+  return total_size;
+}
+
+void MajorFragmentProfile::MergeFrom(const ::google::protobuf::Message& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  const MajorFragmentProfile* source =
+    ::google::protobuf::internal::dynamic_cast_if_available<const MajorFragmentProfile*>(
+      &from);
+  if (source == NULL) {
+    ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+  } else {
+    MergeFrom(*source);
+  }
+}
+
+void MajorFragmentProfile::MergeFrom(const MajorFragmentProfile& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  minor_fragment_profile_.MergeFrom(from.minor_fragment_profile_);
+  if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    if (from.has_major_fragment_id()) {
+      set_major_fragment_id(from.major_fragment_id());
+    }
+  }
+  mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void MajorFragmentProfile::CopyFrom(const ::google::protobuf::Message& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+void MajorFragmentProfile::CopyFrom(const MajorFragmentProfile& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+bool MajorFragmentProfile::IsInitialized() const {
+
+  return true;
+}
+
+void MajorFragmentProfile::Swap(MajorFragmentProfile* other) {
+  if (other != this) {
+    std::swap(major_fragment_id_, other->major_fragment_id_);
+    minor_fragment_profile_.Swap(&other->minor_fragment_profile_);
+    std::swap(_has_bits_[0], other->_has_bits_[0]);
+    _unknown_fields_.Swap(&other->_unknown_fields_);
+    std::swap(_cached_size_, other->_cached_size_);
+  }
+}
+
+::google::protobuf::Metadata MajorFragmentProfile::GetMetadata() const {
+  protobuf_AssignDescriptorsOnce();
+  ::google::protobuf::Metadata metadata;
+  metadata.descriptor = MajorFragmentProfile_descriptor_;
+  metadata.reflection = MajorFragmentProfile_reflection_;
+  return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int MinorFragmentProfile::kStateFieldNumber;
+const int MinorFragmentProfile::kErrorFieldNumber;
+const int MinorFragmentProfile::kMinorFragmentIdFieldNumber;
+const int MinorFragmentProfile::kOperatorProfileFieldNumber;
+const int MinorFragmentProfile::kStartTimeFieldNumber;
+const int MinorFragmentProfile::kEndTimeFieldNumber;
+const int MinorFragmentProfile::kMemoryUsedFieldNumber;
+const int MinorFragmentProfile::kMaxMemoryUsedFieldNumber;
+const int MinorFragmentProfile::kEndpointFieldNumber;
+#endif  // !_MSC_VER
+
+MinorFragmentProfile::MinorFragmentProfile()
+  : ::google::protobuf::Message() {
+  SharedCtor();
+}
+
+void MinorFragmentProfile::InitAsDefaultInstance() {
+  error_ = const_cast< ::exec::shared::DrillPBError*>(&::exec::shared::DrillPBError::default_instance());
+  endpoint_ = const_cast< ::exec::DrillbitEndpoint*>(&::exec::DrillbitEndpoint::default_instance());
+}
+
+MinorFragmentProfile::MinorFragmentProfile(const MinorFragmentProfile& from)
+  : ::google::protobuf::Message() {
+  SharedCtor();
+  MergeFrom(from);
+}
+
+void MinorFragmentProfile::SharedCtor() {
+  _cached_size_ = 0;
+  state_ = 0;
+  error_ = NULL;
+  minor_fragment_id_ = 0;
+  start_time_ = GOOGLE_LONGLONG(0);
+  end_time_ = GOOGLE_LONGLONG(0);
+  memory_used_ = GOOGLE_LONGLONG(0);
+  max_memory_used_ = GOOGLE_LONGLONG(0);
+  endpoint_ = NULL;
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+MinorFragmentProfile::~MinorFragmentProfile() {
+  SharedDtor();
+}
+
+void MinorFragmentProfile::SharedDtor() {
+  if (this != default_instance_) {
+    delete error_;
+    delete endpoint_;
+  }
+}
+
+void MinorFragmentProfile::SetCachedSize(int size) const {
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* MinorFragmentProfile::descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return MinorFragmentProfile_descriptor_;
+}
+
+const MinorFragmentProfile& MinorFragmentProfile::default_instance() {
+  if (default_instance_ == NULL) protobuf_AddDesc_UserBitShared_2eproto();
+  return *default_instance_;
+}
+
+MinorFragmentProfile* MinorFragmentProfile::default_instance_ = NULL;
+
+MinorFragmentProfile* MinorFragmentProfile::New() const {
+  return new MinorFragmentProfile;
+}
+
+void MinorFragmentProfile::Clear() {
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    state_ = 0;
+    if (has_error()) {
+      if (error_ != NULL) error_->::exec::shared::DrillPBError::Clear();
+    }
+    minor_fragment_id_ = 0;
+    start_time_ = GOOGLE_LONGLONG(0);
+    end_time_ = GOOGLE_LONGLONG(0);
+    memory_used_ = GOOGLE_LONGLONG(0);
+    max_memory_used_ = GOOGLE_LONGLONG(0);
+  }
+  if (_has_bits_[8 / 32] & (0xffu << (8 % 32))) {
+    if (has_endpoint()) {
+      if (endpoint_ != NULL) endpoint_->::exec::DrillbitEndpoint::Clear();
+    }
+  }
+  operator_profile_.Clear();
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+  mutable_unknown_fields()->Clear();
+}
+
+bool MinorFragmentProfile::MergePartialFromCodedStream(
+    ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+  ::google::protobuf::uint32 tag;
+  while ((tag = input->ReadTag()) != 0) {
+    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+      // optional .exec.shared.FragmentState state = 1;
+      case 1: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::exec::shared::FragmentState_IsValid(value)) {
+            set_state(static_cast< ::exec::shared::FragmentState >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(1, value);
+          }
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(18)) goto parse_error;
+        break;
+      }
+
+      // optional .exec.shared.DrillPBError error = 2;
+      case 2: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_error:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+               input, mutable_error()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(24)) goto parse_minor_fragment_id;
+        break;
+      }
+
+      // optional int32 minor_fragment_id = 3;
+      case 3: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_minor_fragment_id:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+                 input, &minor_fragment_id_)));
+          set_has_minor_fragment_id();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(34)) goto parse_operator_profile;
+        break;
+      }
+
+      // repeated .exec.shared.OperatorProfile operator_profile = 4;
+      case 4: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_operator_profile:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+                input, add_operator_profile()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(34)) goto parse_operator_profile;
+        if (input->ExpectTag(40)) goto parse_start_time;
+        break;
+      }
+
+      // optional int64 start_time = 5;
+      case 5: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_start_time:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &start_time_)));
+          set_has_start_time();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(48)) goto parse_end_time;
+        break;
+      }
+
+      // optional int64 end_time = 6;
+      case 6: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_end_time:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &end_time_)));
+          set_has_end_time();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(56)) goto parse_memory_used;
+        break;
+      }
+
+      // optional int64 memory_used = 7;
+      case 7: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_memory_used:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &memory_used_)));
+          set_has_memory_used();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(64)) goto parse_max_memory_used;
+        break;
+      }
+
+      // optional int64 max_memory_used = 8;
+      case 8: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_max_memory_used:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &max_memory_used_)));
+          set_has_max_memory_used();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(74)) goto parse_endpoint;
+        break;
+      }
+
+      // optional .exec.DrillbitEndpoint endpoint = 9;
+      case 9: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_endpoint:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+               input, mutable_endpoint()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectAtEnd()) return true;
+        break;
+      }
+
+      default: {
+      handle_uninterpreted:
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+          return true;
+        }
+        DO_(::google::protobuf::internal::WireFormat::SkipField(
+              input, tag, mutable_unknown_fields()));
+        break;
+      }
+    }
+  }
+  return true;
+#undef DO_
+}
+
+void MinorFragmentProfile::SerializeWithCachedSizes(
+    ::google::protobuf::io::CodedOutputStream* output) const {
+  // optional .exec.shared.FragmentState state = 1;
+  if (has_state()) {
+    ::google::protobuf::internal::WireFormatLite::WriteEnum(
+      1, this->state(), output);
+  }
+
+  // optional .exec.shared.DrillPBError error = 2;
+  if (has_error()) {
+    ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+      2, this->error(), output);
+  }
+
+  // optional int32 minor_fragment_id = 3;
+  if (has_minor_fragment_id()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt32(3, this->minor_fragment_id(), output);
+  }
+
+  // repeated .exec.shared.OperatorProfile operator_profile = 4;
+  for (int i = 0; i < this->operator_profile_size(); i++) {
+    ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+      4, this->operator_profile(i), output);
+  }
+
+  // optional int64 start_time = 5;
+  if (has_start_time()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(5, this->start_time(), output);
+  }
+
+  // optional int64 end_time = 6;
+  if (has_end_time()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(6, this->end_time(), output);
+  }
+
+  // optional int64 memory_used = 7;
+  if (has_memory_used()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(7, this->memory_used(), output);
+  }
+
+  // optional int64 max_memory_used = 8;
+  if (has_max_memory_used()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(8, this->max_memory_used(), output);
+  }
+
+  // optional .exec.DrillbitEndpoint endpoint = 9;
+  if (has_endpoint()) {
+    ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+      9, this->endpoint(), output);
+  }
+
+  if (!unknown_fields().empty()) {
+    ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+        unknown_fields(), output);
+  }
+}
+
+::google::protobuf::uint8* MinorFragmentProfile::SerializeWithCachedSizesToArray(
+    ::google::protobuf::uint8* target) const {
+  // optional .exec.shared.FragmentState state = 1;
+  if (has_state()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
+      1, this->state(), target);
+  }
+
+  // optional .exec.shared.DrillPBError error = 2;
+  if (has_error()) {
+    target = ::google::protobuf::internal::WireFormatLite::
+      WriteMessageNoVirtualToArray(
+        2, this->error(), target);
+  }
+
+  // optional int32 minor_fragment_id = 3;
+  if (has_minor_fragment_id()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(3, this->minor_fragment_id(), target);
+  }
+
+  // repeated .exec.shared.OperatorProfile operator_profile = 4;
+  for (int i = 0; i < this->operator_profile_size(); i++) {
+    target = ::google::protobuf::internal::WireFormatLite::
+      WriteMessageNoVirtualToArray(
+        4, this->operator_profile(i), target);
+  }
+
+  // optional int64 start_time = 5;
+  if (has_start_time()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(5, this->start_time(), target);
+  }
+
+  // optional int64 end_time = 6;
+  if (has_end_time()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(6, this->end_time(), target);
+  }
+
+  // optional int64 memory_used = 7;
+  if (has_memory_used()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(7, this->memory_used(), target);
+  }
+
+  // optional int64 max_memory_used = 8;
+  if (has_max_memory_used()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(8, this->max_memory_used(), target);
+  }
+
+  // optional .exec.DrillbitEndpoint endpoint = 9;
+  if (has_endpoint()) {
+    target = ::google::protobuf::internal::WireFormatLite::
+      WriteMessageNoVirtualToArray(
+        9, this->endpoint(), target);
+  }
+
+  if (!unknown_fields().empty()) {
+    target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+        unknown_fields(), target);
+  }
+  return target;
+}
+
+int MinorFragmentProfile::ByteSize() const {
+  int total_size = 0;
+
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    // optional .exec.shared.FragmentState state = 1;
+    if (has_state()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::EnumSize(this->state());
+    }
+
+    // optional .exec.shared.DrillPBError error = 2;
+    if (has_error()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+          this->error());
+    }
+
+    // optional int32 minor_fragment_id = 3;
+    if (has_minor_fragment_id()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int32Size(
+          this->minor_fragment_id());
+    }
+
+    // optional int64 start_time = 5;
+    if (has_start_time()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->start_time());
+    }
+
+    // optional int64 end_time = 6;
+    if (has_end_time()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->end_time());
+    }
+
+    // optional int64 memory_used = 7;
+    if (has_memory_used()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->memory_used());
+    }
+
+    // optional int64 max_memory_used = 8;
+    if (has_max_memory_used()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->max_memory_used());
+    }
+
+  }
+  if (_has_bits_[8 / 32] & (0xffu << (8 % 32))) {
+    // optional .exec.DrillbitEndpoint endpoint = 9;
+    if (has_endpoint()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+          this->endpoint());
+    }
+
+  }
+  // repeated .exec.shared.OperatorProfile operator_profile = 4;
+  total_size += 1 * this->operator_profile_size();
+  for (int i = 0; i < this->operator_profile_size(); i++) {
+    total_size +=
+      ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+        this->operator_profile(i));
+  }
+
+  if (!unknown_fields().empty()) {
+    total_size +=
+      ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+        unknown_fields());
+  }
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = total_size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+  return total_size;
+}
+
+void MinorFragmentProfile::MergeFrom(const ::google::protobuf::Message& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  const MinorFragmentProfile* source =
+    ::google::protobuf::internal::dynamic_cast_if_available<const MinorFragmentProfile*>(
+      &from);
+  if (source == NULL) {
+    ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+  } else {
+    MergeFrom(*source);
+  }
+}
+
+void MinorFragmentProfile::MergeFrom(const MinorFragmentProfile& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  operator_profile_.MergeFrom(from.operator_profile_);
+  if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    if (from.has_state()) {
+      set_state(from.state());
+    }
+    if (from.has_error()) {
+      mutable_error()->::exec::shared::DrillPBError::MergeFrom(from.error());
+    }
+    if (from.has_minor_fragment_id()) {
+      set_minor_fragment_id(from.minor_fragment_id());
+    }
+    if (from.has_start_time()) {
+      set_start_time(from.start_time());
+    }
+    if (from.has_end_time()) {
+      set_end_time(from.end_time());
+    }
+    if (from.has_memory_used()) {
+      set_memory_used(from.memory_used());
+    }
+    if (from.has_max_memory_used()) {
+      set_max_memory_used(from.max_memory_used());
+    }
+  }
+  if (from._has_bits_[8 / 32] & (0xffu << (8 % 32))) {
+    if (from.has_endpoint()) {
+      mutable_endpoint()->::exec::DrillbitEndpoint::MergeFrom(from.endpoint());
+    }
+  }
+  mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void MinorFragmentProfile::CopyFrom(const ::google::protobuf::Message& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+void MinorFragmentProfile::CopyFrom(const MinorFragmentProfile& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+bool MinorFragmentProfile::IsInitialized() const {
+
+  return true;
+}
+
+void MinorFragmentProfile::Swap(MinorFragmentProfile* other) {
+  if (other != this) {
+    std::swap(state_, other->state_);
+    std::swap(error_, other->error_);
+    std::swap(minor_fragment_id_, other->minor_fragment_id_);
+    operator_profile_.Swap(&other->operator_profile_);
+    std::swap(start_time_, other->start_time_);
+    std::swap(end_time_, other->end_time_);
+    std::swap(memory_used_, other->memory_used_);
+    std::swap(max_memory_used_, other->max_memory_used_);
+    std::swap(endpoint_, other->endpoint_);
+    std::swap(_has_bits_[0], other->_has_bits_[0]);
+    _unknown_fields_.Swap(&other->_unknown_fields_);
+    std::swap(_cached_size_, other->_cached_size_);
+  }
+}
+
+::google::protobuf::Metadata MinorFragmentProfile::GetMetadata() const {
+  protobuf_AssignDescriptorsOnce();
+  ::google::protobuf::Metadata metadata;
+  metadata.descriptor = MinorFragmentProfile_descriptor_;
+  metadata.reflection = MinorFragmentProfile_reflection_;
+  return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int OperatorProfile::kInputProfileFieldNumber;
+const int OperatorProfile::kOperatorIdFieldNumber;
+const int OperatorProfile::kOperatorTypeFieldNumber;
+const int OperatorProfile::kSetupNanosFieldNumber;
+const int OperatorProfile::kProcessNanosFieldNumber;
+const int OperatorProfile::kLocalMemoryAllocatedFieldNumber;
+const int OperatorProfile::kMetricFieldNumber;
+#endif  // !_MSC_VER
+
+OperatorProfile::OperatorProfile()
+  : ::google::protobuf::Message() {
+  SharedCtor();
+}
+
+void OperatorProfile::InitAsDefaultInstance() {
+}
+
+OperatorProfile::OperatorProfile(const OperatorProfile& from)
+  : ::google::protobuf::Message() {
+  SharedCtor();
+  MergeFrom(from);
+}
+
+void OperatorProfile::SharedCtor() {
+  _cached_size_ = 0;
+  operator_id_ = 0;
+  operator_type_ = 0;
+  setup_nanos_ = GOOGLE_LONGLONG(0);
+  process_nanos_ = GOOGLE_LONGLONG(0);
+  local_memory_allocated_ = GOOGLE_LONGLONG(0);
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+OperatorProfile::~OperatorProfile() {
+  SharedDtor();
+}
+
+void OperatorProfile::SharedDtor() {
+  if (this != default_instance_) {
+  }
+}
+
+void OperatorProfile::SetCachedSize(int size) const {
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* OperatorProfile::descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return OperatorProfile_descriptor_;
+}
+
+const OperatorProfile& OperatorProfile::default_instance() {
+  if (default_instance_ == NULL) protobuf_AddDesc_UserBitShared_2eproto();
+  return *default_instance_;
+}
+
+OperatorProfile* OperatorProfile::default_instance_ = NULL;
+
+OperatorProfile* OperatorProfile::New() const {
+  return new OperatorProfile;
+}
+
+void OperatorProfile::Clear() {
+  if (_has_bits_[1 / 32] & (0xffu << (1 % 32))) {
+    operator_id_ = 0;
+    operator_type_ = 0;
+    setup_nanos_ = GOOGLE_LONGLONG(0);
+    process_nanos_ = GOOGLE_LONGLONG(0);
+    local_memory_allocated_ = GOOGLE_LONGLONG(0);
+  }
+  input_profile_.Clear();
+  metric_.Clear();
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+  mutable_unknown_fields()->Clear();
+}
+
+bool OperatorProfile::MergePartialFromCodedStream(
+    ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+  ::google::protobuf::uint32 tag;
+  while ((tag = input->ReadTag()) != 0) {
+    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+      // repeated .exec.shared.StreamProfile input_profile = 1;
+      case 1: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_input_profile:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+                input, add_input_profile()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(10)) goto parse_input_profile;
+        if (input->ExpectTag(24)) goto parse_operator_id;
+        break;
+      }
+
+      // optional int32 operator_id = 3;
+      case 3: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_operator_id:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+                 input, &operator_id_)));
+          set_has_operator_id();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(32)) goto parse_operator_type;
+        break;
+      }
+
+      // optional int32 operator_type = 4;
+      case 4: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_operator_type:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+                 input, &operator_type_)));
+          set_has_operator_type();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(40)) goto parse_setup_nanos;
+        break;
+      }
+
+      // optional int64 setup_nanos = 5;
+      case 5: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_setup_nanos:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &setup_nanos_)));
+          set_has_setup_nanos();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(48)) goto parse_process_nanos;
+        break;
+      }
+
+      // optional int64 process_nanos = 6;
+      case 6: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_process_nanos:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &process_nanos_)));
+          set_has_process_nanos();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(56)) goto parse_local_memory_allocated;
+        break;
+      }
+
+      // optional int64 local_memory_allocated = 7;
+      case 7: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_local_memory_allocated:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &local_memory_allocated_)));
+          set_has_local_memory_allocated();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(66)) goto parse_metric;
+        break;
+      }
+
+      // repeated .exec.shared.MetricValue metric = 8;
+      case 8: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+         parse_metric:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+                input, add_metric()));
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(66)) goto parse_metric;
+        if (input->ExpectAtEnd()) return true;
+        break;
+      }
+
+      default: {
+      handle_uninterpreted:
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+          return true;
+        }
+        DO_(::google::protobuf::internal::WireFormat::SkipField(
+              input, tag, mutable_unknown_fields()));
+        break;
+      }
+    }
+  }
+  return true;
+#undef DO_
+}
+
+void OperatorProfile::SerializeWithCachedSizes(
+    ::google::protobuf::io::CodedOutputStream* output) const {
+  // repeated .exec.shared.StreamProfile input_profile = 1;
+  for (int i = 0; i < this->input_profile_size(); i++) {
+    ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+      1, this->input_profile(i), output);
+  }
+
+  // optional int32 operator_id = 3;
+  if (has_operator_id()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt32(3, this->operator_id(), output);
+  }
+
+  // optional int32 operator_type = 4;
+  if (has_operator_type()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt32(4, this->operator_type(), output);
+  }
+
+  // optional int64 setup_nanos = 5;
+  if (has_setup_nanos()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(5, this->setup_nanos(), output);
+  }
+
+  // optional int64 process_nanos = 6;
+  if (has_process_nanos()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(6, this->process_nanos(), output);
+  }
+
+  // optional int64 local_memory_allocated = 7;
+  if (has_local_memory_allocated()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(7, this->local_memory_allocated(), output);
+  }
+
+  // repeated .exec.shared.MetricValue metric = 8;
+  for (int i = 0; i < this->metric_size(); i++) {
+    ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
+      8, this->metric(i), output);
+  }
+
+  if (!unknown_fields().empty()) {
+    ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+        unknown_fields(), output);
+  }
+}
+
+::google::protobuf::uint8* OperatorProfile::SerializeWithCachedSizesToArray(
+    ::google::protobuf::uint8* target) const {
+  // repeated .exec.shared.StreamProfile input_profile = 1;
+  for (int i = 0; i < this->input_profile_size(); i++) {
+    target = ::google::protobuf::internal::WireFormatLite::
+      WriteMessageNoVirtualToArray(
+        1, this->input_profile(i), target);
+  }
+
+  // optional int32 operator_id = 3;
+  if (has_operator_id()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(3, this->operator_id(), target);
+  }
+
+  // optional int32 operator_type = 4;
+  if (has_operator_type()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(4, this->operator_type(), target);
+  }
+
+  // optional int64 setup_nanos = 5;
+  if (has_setup_nanos()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(5, this->setup_nanos(), target);
+  }
+
+  // optional int64 process_nanos = 6;
+  if (has_process_nanos()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(6, this->process_nanos(), target);
+  }
+
+  // optional int64 local_memory_allocated = 7;
+  if (has_local_memory_allocated()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(7, this->local_memory_allocated(), target);
+  }
+
+  // repeated .exec.shared.MetricValue metric = 8;
+  for (int i = 0; i < this->metric_size(); i++) {
+    target = ::google::protobuf::internal::WireFormatLite::
+      WriteMessageNoVirtualToArray(
+        8, this->metric(i), target);
+  }
+
+  if (!unknown_fields().empty()) {
+    target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+        unknown_fields(), target);
+  }
+  return target;
+}
+
+int OperatorProfile::ByteSize() const {
+  int total_size = 0;
+
+  if (_has_bits_[1 / 32] & (0xffu << (1 % 32))) {
+    // optional int32 operator_id = 3;
+    if (has_operator_id()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int32Size(
+          this->operator_id());
+    }
+
+    // optional int32 operator_type = 4;
+    if (has_operator_type()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int32Size(
+          this->operator_type());
+    }
+
+    // optional int64 setup_nanos = 5;
+    if (has_setup_nanos()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->setup_nanos());
+    }
+
+    // optional int64 process_nanos = 6;
+    if (has_process_nanos()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->process_nanos());
+    }
+
+    // optional int64 local_memory_allocated = 7;
+    if (has_local_memory_allocated()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->local_memory_allocated());
+    }
+
+  }
+  // repeated .exec.shared.StreamProfile input_profile = 1;
+  total_size += 1 * this->input_profile_size();
+  for (int i = 0; i < this->input_profile_size(); i++) {
+    total_size +=
+      ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+        this->input_profile(i));
+  }
+
+  // repeated .exec.shared.MetricValue metric = 8;
+  total_size += 1 * this->metric_size();
+  for (int i = 0; i < this->metric_size(); i++) {
+    total_size +=
+      ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+        this->metric(i));
+  }
+
+  if (!unknown_fields().empty()) {
+    total_size +=
+      ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+        unknown_fields());
+  }
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = total_size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+  return total_size;
+}
+
+void OperatorProfile::MergeFrom(const ::google::protobuf::Message& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  const OperatorProfile* source =
+    ::google::protobuf::internal::dynamic_cast_if_available<const OperatorProfile*>(
+      &from);
+  if (source == NULL) {
+    ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+  } else {
+    MergeFrom(*source);
+  }
+}
+
+void OperatorProfile::MergeFrom(const OperatorProfile& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  input_profile_.MergeFrom(from.input_profile_);
+  metric_.MergeFrom(from.metric_);
+  if (from._has_bits_[1 / 32] & (0xffu << (1 % 32))) {
+    if (from.has_operator_id()) {
+      set_operator_id(from.operator_id());
+    }
+    if (from.has_operator_type()) {
+      set_operator_type(from.operator_type());
+    }
+    if (from.has_setup_nanos()) {
+      set_setup_nanos(from.setup_nanos());
+    }
+    if (from.has_process_nanos()) {
+      set_process_nanos(from.process_nanos());
+    }
+    if (from.has_local_memory_allocated()) {
+      set_local_memory_allocated(from.local_memory_allocated());
+    }
+  }
+  mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void OperatorProfile::CopyFrom(const ::google::protobuf::Message& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+void OperatorProfile::CopyFrom(const OperatorProfile& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+bool OperatorProfile::IsInitialized() const {
+
+  return true;
+}
+
+void OperatorProfile::Swap(OperatorProfile* other) {
+  if (other != this) {
+    input_profile_.Swap(&other->input_profile_);
+    std::swap(operator_id_, other->operator_id_);
+    std::swap(operator_type_, other->operator_type_);
+    std::swap(setup_nanos_, other->setup_nanos_);
+    std::swap(process_nanos_, other->process_nanos_);
+    std::swap(local_memory_allocated_, other->local_memory_allocated_);
+    metric_.Swap(&other->metric_);
+    std::swap(_has_bits_[0], other->_has_bits_[0]);
+    _unknown_fields_.Swap(&other->_unknown_fields_);
+    std::swap(_cached_size_, other->_cached_size_);
+  }
+}
+
+::google::protobuf::Metadata OperatorProfile::GetMetadata() const {
+  protobuf_AssignDescriptorsOnce();
+  ::google::protobuf::Metadata metadata;
+  metadata.descriptor = OperatorProfile_descriptor_;
+  metadata.reflection = OperatorProfile_reflection_;
+  return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int StreamProfile::kRecordsFieldNumber;
+const int StreamProfile::kBatchesFieldNumber;
+const int StreamProfile::kSchemasFieldNumber;
+#endif  // !_MSC_VER
+
+StreamProfile::StreamProfile()
+  : ::google::protobuf::Message() {
+  SharedCtor();
+}
+
+void StreamProfile::InitAsDefaultInstance() {
+}
+
+StreamProfile::StreamProfile(const StreamProfile& from)
+  : ::google::protobuf::Message() {
+  SharedCtor();
+  MergeFrom(from);
+}
+
+void StreamProfile::SharedCtor() {
+  _cached_size_ = 0;
+  records_ = GOOGLE_LONGLONG(0);
+  batches_ = GOOGLE_LONGLONG(0);
+  schemas_ = GOOGLE_LONGLONG(0);
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+StreamProfile::~StreamProfile() {
+  SharedDtor();
+}
+
+void StreamProfile::SharedDtor() {
+  if (this != default_instance_) {
+  }
+}
+
+void StreamProfile::SetCachedSize(int size) const {
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* StreamProfile::descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return StreamProfile_descriptor_;
+}
+
+const StreamProfile& StreamProfile::default_instance() {
+  if (default_instance_ == NULL) protobuf_AddDesc_UserBitShared_2eproto();
+  return *default_instance_;
+}
+
+StreamProfile* StreamProfile::default_instance_ = NULL;
+
+StreamProfile* StreamProfile::New() const {
+  return new StreamProfile;
+}
+
+void StreamProfile::Clear() {
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    records_ = GOOGLE_LONGLONG(0);
+    batches_ = GOOGLE_LONGLONG(0);
+    schemas_ = GOOGLE_LONGLONG(0);
+  }
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+  mutable_unknown_fields()->Clear();
+}
+
+bool StreamProfile::MergePartialFromCodedStream(
+    ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+  ::google::protobuf::uint32 tag;
+  while ((tag = input->ReadTag()) != 0) {
+    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+      // optional int64 records = 1;
+      case 1: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &records_)));
+          set_has_records();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(16)) goto parse_batches;
+        break;
+      }
+
+      // optional int64 batches = 2;
+      case 2: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_batches:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &batches_)));
+          set_has_batches();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(24)) goto parse_schemas;
+        break;
+      }
+
+      // optional int64 schemas = 3;
+      case 3: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+         parse_schemas:
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
+                 input, &schemas_)));
+          set_has_schemas();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectAtEnd()) return true;
+        break;
+      }
+
+      default: {
+      handle_uninterpreted:
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_END_GROUP) {
+          return true;
+        }
+        DO_(::google::protobuf::internal::WireFormat::SkipField(
+              input, tag, mutable_unknown_fields()));
+        break;
+      }
+    }
+  }
+  return true;
+#undef DO_
+}
+
+void StreamProfile::SerializeWithCachedSizes(
+    ::google::protobuf::io::CodedOutputStream* output) const {
+  // optional int64 records = 1;
+  if (has_records()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(1, this->records(), output);
+  }
+
+  // optional int64 batches = 2;
+  if (has_batches()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(2, this->batches(), output);
+  }
+
+  // optional int64 schemas = 3;
+  if (has_schemas()) {
+    ::google::protobuf::internal::WireFormatLite::WriteInt64(3, this->schemas(), output);
+  }
+
+  if (!unknown_fields().empty()) {
+    ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
+        unknown_fields(), output);
+  }
+}
+
+::google::protobuf::uint8* StreamProfile::SerializeWithCachedSizesToArray(
+    ::google::protobuf::uint8* target) const {
+  // optional int64 records = 1;
+  if (has_records()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(1, this->records(), target);
+  }
+
+  // optional int64 batches = 2;
+  if (has_batches()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(2, this->batches(), target);
+  }
+
+  // optional int64 schemas = 3;
+  if (has_schemas()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(3, this->schemas(), target);
+  }
+
+  if (!unknown_fields().empty()) {
+    target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
+        unknown_fields(), target);
+  }
+  return target;
+}
+
+int StreamProfile::ByteSize() const {
+  int total_size = 0;
+
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    // optional int64 records = 1;
+    if (has_records()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->records());
+    }
+
+    // optional int64 batches = 2;
+    if (has_batches()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->batches());
+    }
+
+    // optional int64 schemas = 3;
+    if (has_schemas()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int64Size(
+          this->schemas());
+    }
+
+  }
+  if (!unknown_fields().empty()) {
+    total_size +=
+      ::google::protobuf::internal::WireFormat::ComputeUnknownFieldsSize(
+        unknown_fields());
+  }
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = total_size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+  return total_size;
+}
+
+void StreamProfile::MergeFrom(const ::google::protobuf::Message& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  const StreamProfile* source =
+    ::google::protobuf::internal::dynamic_cast_if_available<const StreamProfile*>(
+      &from);
+  if (source == NULL) {
+    ::google::protobuf::internal::ReflectionOps::Merge(from, this);
+  } else {
+    MergeFrom(*source);
+  }
+}
+
+void StreamProfile::MergeFrom(const StreamProfile& from) {
+  GOOGLE_CHECK_NE(&from, this);
+  if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    if (from.has_records()) {
+      set_records(from.records());
+    }
+    if (from.has_batches()) {
+      set_batches(from.batches());
+    }
+    if (from.has_schemas()) {
+      set_schemas(from.schemas());
+    }
+  }
+  mutable_unknown_fields()->MergeFrom(from.unknown_fields());
+}
+
+void StreamProfile::CopyFrom(const ::google::protobuf::Message& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+void StreamProfile::CopyFrom(const StreamProfile& from) {
+  if (&from == this) return;
+  Clear();
+  MergeFrom(from);
+}
+
+bool StreamProfile::IsInitialized() const {
+
+  return true;
+}
+
+void StreamProfile::Swap(StreamProfile* other) {
+  if (other != this) {
+    std::swap(records_, other->records_);
+    std::swap(batches_, other->batches_);
+    std::swap(schemas_, other->schemas_);
+    std::swap(_has_bits_[0], other->_has_bits_[0]);
+    _unknown_fields_.Swap(&other->_unknown_fields_);
+    std::swap(_cached_size_, other->_cached_size_);
+  }
+}
+
+::google::protobuf::Metadata StreamProfile::GetMetadata() const {
+  protobuf_AssignDescriptorsOnce();
+  ::google::protobuf::Metadata metadata;
+  metadata.descriptor = StreamProfile_descriptor_;
+  metadata.reflection = StreamProfile_reflection_;
+  return metadata;
+}
+
+
+// ===================================================================
+
+#ifndef _MSC_VER
+const int MetricValue::kMetricIdFieldNumber;
+const int MetricValue::kLongValueFieldNumber;
+const int MetricValue::kDoubleValueFieldNumber;
+#endif  // !_MSC_VER
+
+MetricValue::MetricValue()
+  : ::google::protobuf::Message() {
+  SharedCtor();
+}
+
+void MetricValue::InitAsDefaultInstance() {
+}
+
+MetricValue::MetricValue(const MetricValue& from)
+  : ::google::protobuf::Message() {
+  SharedCtor();
+  MergeFrom(from);
+}
+
+void MetricValue::SharedCtor() {
+  _cached_size_ = 0;
+  metric_id_ = 0;
+  long_value_ = GOOGLE_LONGLONG(0);
+  double_value_ = 0;
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+}
+
+MetricValue::~MetricValue() {
+  SharedDtor();
+}
+
+void MetricValue::SharedDtor() {
+  if (this != default_instance_) {
+  }
+}
+
+void MetricValue::SetCachedSize(int size) const {
+  GOOGLE_SAFE_CONCURRENT_WRITES_BEGIN();
+  _cached_size_ = size;
+  GOOGLE_SAFE_CONCURRENT_WRITES_END();
+}
+const ::google::protobuf::Descriptor* MetricValue::descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return MetricValue_descriptor_;
+}
+
+const MetricValue& MetricValue::default_instance() {
+  if (default_instance_ == NULL) protobuf_AddDesc_UserBitShared_2eproto();
+  return *default_instance_;
+}
+
+MetricValue* MetricValue::default_instance_ = NULL;
+
+MetricValue* MetricValue::New() const {
+  return new MetricValue;
+}
+
+void MetricValue::Clear() {
+  if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
+    metric_id_ = 0;
+    long_value_ = GOOGLE_LONGLONG(0);
+    double_value_ = 0;
+  }
+  ::memset(_has_bits_, 0, sizeof(_has_bits_));
+  mutable_unknown_fields()->Clear();
+}
+
+bool MetricValue::MergePartialFromCodedStream(
+    ::google::protobuf::io::CodedInputStream* input) {
+#define DO_(EXPRESSION) if (!(EXPRESSION)) return false
+  ::google::protobuf::uint32 tag;
+  while ((tag = input->ReadTag()) != 0) {
+    switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
+      // optional int32 metric_id = 1;
+      case 1: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
+                 input, &metric_id_)));
+          set_has_metric_id();
+        } else {
+          goto handle_uninterpreted;
+        }
+        if (input->ExpectTag(16)) goto parse_long_value;
+        break;
+      }
+
+      // optional int64 long_value = 2;
+      case 2: {
+        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) =

<TRUNCATED>

[31/32] git commit: DRILL-650: Left outer join sometimes fails sometimes with Failure while setting up Foreman error

Posted by ja...@apache.org.
DRILL-650: Left outer join sometimes fails sometimes with Failure while setting up Foreman error


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/65f6bcbf
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/65f6bcbf
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/65f6bcbf

Branch: refs/heads/master
Commit: 65f6bcbfc32a8dd3c69e1de127ad79ece0556b20
Parents: 4d13046
Author: Aditya Kishore <ad...@maprtech.com>
Authored: Fri Jun 20 07:18:35 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Fri Jun 20 10:58:10 2014 -0700

----------------------------------------------------------------------
 .../planner/types/RelDataTypeDrillImpl.java     | 23 ++++++++++++++++++++
 .../exec/planner/types/RelDataTypeHolder.java   | 15 ++++++++++++-
 2 files changed, 37 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/65f6bcbf/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/RelDataTypeDrillImpl.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/RelDataTypeDrillImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/RelDataTypeDrillImpl.java
index 0f3c24f..bb97992 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/RelDataTypeDrillImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/RelDataTypeDrillImpl.java
@@ -84,4 +84,27 @@ public class RelDataTypeDrillImpl extends RelDataTypeImpl {
     public boolean isStruct() {
         return true;
     }
+
+    @Override
+    public int hashCode() {
+      return holder == null ? 0 : holder.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj)
+        return true;
+      if (!super.equals(obj))
+        return false;
+      if (getClass() != obj.getClass())
+        return false;
+      RelDataTypeDrillImpl other = (RelDataTypeDrillImpl) obj;
+      if (holder == null) {
+        if (other.holder != null)
+          return false;
+      } else if (!holder.equals(other.holder))
+        return false;
+      return true;
+    }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/65f6bcbf/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/RelDataTypeHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/RelDataTypeHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/RelDataTypeHolder.java
index 201e520..1485aa3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/RelDataTypeHolder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/types/RelDataTypeHolder.java
@@ -34,7 +34,6 @@ public class RelDataTypeHolder {
   private RelDataTypeFactory typeFactory;
 
   public List<RelDataTypeField> getFieldList(RelDataTypeFactory typeFactory) {
-
     addStarIfEmpty(typeFactory);
     return fields;
   }
@@ -81,4 +80,18 @@ public class RelDataTypeHolder {
     this.typeFactory = typeFactory;
   }
 
+  @Override
+  public int hashCode() {
+    return System.identityHashCode(this);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    return (this == obj);
+  }
+
+  private List<RelDataTypeField> getFieldList() {
+    return getFieldList(this.typeFactory);
+  }
+
 }


[08/32] git commit: DRILL-1008: Update the way that Windows sqlline.bat composes its classpath

Posted by ja...@apache.org.
DRILL-1008: Update the way that Windows sqlline.bat composes its classpath


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/7d436697
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/7d436697
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/7d436697

Branch: refs/heads/master
Commit: 7d436697c0d27b0fb6863b8a37efe1e1952ffc28
Parents: a88ebb2
Author: Patrick Wong <pw...@maprtech.com>
Authored: Wed Jun 18 00:22:51 2014 +0000
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Wed Jun 18 21:44:54 2014 -0700

----------------------------------------------------------------------
 distribution/src/resources/sqlline.bat | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/7d436697/distribution/src/resources/sqlline.bat
----------------------------------------------------------------------
diff --git a/distribution/src/resources/sqlline.bat b/distribution/src/resources/sqlline.bat
index dde4608..48aa97d 100755
--- a/distribution/src/resources/sqlline.bat
+++ b/distribution/src/resources/sqlline.bat
@@ -133,22 +133,25 @@ if "test%HBASE_HOME%" == "test" (
 )
 
 echo Calculating Drill classpath...
-set DRILL_CLASSPATH=%DRILL_HOME%\jars\*;%DRILL_CLASSPATH%
-set DRILL_CLASSPATH=%DRILL_HOME%\lib\*;%DRILL_CLASSPATH%
-set DRILL_CLASSPATH=%DRILL_HOME%\contrib\*;%DRILL_CLASSPATH%
-if "test%USE_HADOOP_CP%"=="1" set DRILL_CLASSPATH=%HADOOP_CLASSPATH%;!DRILL_CLASSPATH!
-if "test%USE_HBASE_CP%"=="1" set DRILL_CLASSPATH=%HBASE_CLASSPATH%;!DRILL_CLASSPATH!
-set DRILL_CLASSPATH=%DRILL_CONF_DIR%;%DRILL_CLASSPATH%
+
+set DRILL_CP=%DRILL_CONF_DIR%
+if NOT "test%DRILL_CLASSPATH_PREFIX%"=="test" set DRILL_CP=!DRILL_CP!;%DRILL_CLASSPATH_PREFIX%
+set DRILL_CP=%DRILL_CP%;%DRILL_HOME%\jars\*
+set DRILL_CP=%DRILL_CP%;%DRILL_HOME%\extlib\*
+if "test%USE_HADOOP_CP%"=="test1" set DRILL_CP=!DRILL_CP!;%HADOOP_CLASSPATH%
+if "test%USE_HBASE_CP%"=="test1" set DRILL_CP=!DRILL_CP!;%HBASE_CLASSPATH%
+set DRILL_CP=%DRILL_CP%;%DRILL_HOME%\lib\*
+set DRILL_CP=%DRILL_CP%;%DRILL_HOME%\contrib\*
+if NOT "test%DRILL_CLASSPATH%"=="test" set DRILL_CP=!DRILL_CP!;%DRILL_CLASSPATH%
 
 set "DRILL_SHELL_JAVA_OPTS=%DRILL_SHELL_JAVA_OPTS% -Dlog.path=%DRILL_LOG_DIR%\sqlline.log"
 
 if NOT "test%QUERY%"=="test" (
-  echo %QUERY% | java %DRILL_SHELL_JAVA_OPTS% %DRILL_JAVA_OPTS% -cp %DRILL_CLASSPATH% sqlline.SqlLine -d org.apache.drill.jdbc.Driver %DRILL_ARGS%
+  echo %QUERY% | java %DRILL_SHELL_JAVA_OPTS% %DRILL_JAVA_OPTS% -cp %DRILL_CP% sqlline.SqlLine -d org.apache.drill.jdbc.Driver %DRILL_ARGS%
 ) else (
   if NOT "test%FILE%"=="test" (
-    java %DRILL_SHELL_JAVA_OPTS% %DRILL_JAVA_OPTS% -cp %DRILL_CLASSPATH% sqlline.SqlLine -d org.apache.drill.jdbc.Driver %DRILL_ARGS% --run=%FILE%
+    java %DRILL_SHELL_JAVA_OPTS% %DRILL_JAVA_OPTS% -cp %DRILL_CP% sqlline.SqlLine -d org.apache.drill.jdbc.Driver %DRILL_ARGS% --run=%FILE%
   ) else (
-    java %DRILL_SHELL_JAVA_OPTS% %DRILL_JAVA_OPTS% -cp %DRILL_CLASSPATH% sqlline.SqlLine -d org.apache.drill.jdbc.Driver %DRILL_ARGS%
+    java %DRILL_SHELL_JAVA_OPTS% %DRILL_JAVA_OPTS% -cp %DRILL_CP% sqlline.SqlLine -d org.apache.drill.jdbc.Driver %DRILL_ARGS%
   )
 )
-


[09/32] git commit: DRILL-953: Handle the case in which /var/log/drill does not exist

Posted by ja...@apache.org.
DRILL-953: Handle the case in which /var/log/drill does not exist


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/4243f54d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/4243f54d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/4243f54d

Branch: refs/heads/master
Commit: 4243f54d16a5595f0a09f6f01a9c9426651d95b2
Parents: 7d43669
Author: Patrick Wong <pw...@maprtech.com>
Authored: Wed Jun 18 21:57:35 2014 +0000
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Wed Jun 18 21:46:31 2014 -0700

----------------------------------------------------------------------
 distribution/src/resources/drill-config.sh | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/4243f54d/distribution/src/resources/drill-config.sh
----------------------------------------------------------------------
diff --git a/distribution/src/resources/drill-config.sh b/distribution/src/resources/drill-config.sh
index 98ae8e5..795e97a 100644
--- a/distribution/src/resources/drill-config.sh
+++ b/distribution/src/resources/drill-config.sh
@@ -82,6 +82,12 @@ if [ "$DRILL_LOG_DIR" = "" ]; then
   DRILL_LOG_DIR=/var/log/drill
 fi
 
+if [ ! -d $DRILL_LOG_DIR ]; then
+  echo "Drill log directory $DRILL_LOG_DIR does not exist, defaulting to $DRILL_HOME/log"
+  DRILL_LOG_DIR=$DRILL_HOME/log
+  mkdir -p $DRILL_LOG_DIR
+fi
+
 # If HADOOP_HOME is specified, add all Hadoop jars except those
 # specifically excluded in $DRILL_HOME/bin/hadoop-excludes.txt
 if [ "${HADOOP_HOME}x" != "x" ] ; then


[26/32] DRILL-1024: Move hive storage code out of 'exec/java-exec' into 'contrib/storage-hive' module.

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java
deleted file mode 100644
index 50c81e9..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java
+++ /dev/null
@@ -1,331 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonTypeName;
-import com.google.common.collect.Lists;
-
-@JsonTypeName("table")
-public class HiveTable {
-
-  @JsonIgnore
-  private Table table;
-
-  @JsonProperty
-  public String tableName;
-  @JsonProperty
-  public String dbName;
-  @JsonProperty
-  public String owner;
-  @JsonProperty
-  public int createTime;
-  @JsonProperty
-  public int lastAccessTime;
-  @JsonProperty
-  public int retention;
-  @JsonProperty
-  public StorageDescriptorWrapper sd;
-  @JsonProperty
-  public List<FieldSchemaWrapper> partitionKeys;
-  @JsonProperty
-  public Map<String,String> parameters;
-  @JsonProperty
-  public String viewOriginalText;
-  @JsonProperty
-  public String viewExpandedText;
-  @JsonProperty
-  public String tableType;
-
-  @JsonCreator
-  public HiveTable(@JsonProperty("tableName") String tableName, @JsonProperty("dbName") String dbName, @JsonProperty("owner") String owner, @JsonProperty("createTime") int createTime,
-                   @JsonProperty("lastAccessTime") int lastAccessTime, @JsonProperty("retention") int retention, @JsonProperty("sd") StorageDescriptorWrapper sd,
-                   @JsonProperty("partitionKeys") List<FieldSchemaWrapper> partitionKeys, @JsonProperty("parameters") Map<String, String> parameters,
-                   @JsonProperty("viewOriginalText") String viewOriginalText, @JsonProperty("viewExpandedText") String viewExpandedText, @JsonProperty("tableType") String tableType
-                   ) {
-    this.tableName = tableName;
-    this.dbName = dbName;
-    this.owner = owner;
-    this.createTime = createTime;
-    this.lastAccessTime = lastAccessTime;
-    this.retention = retention;
-    this.sd = sd;
-    this.partitionKeys = partitionKeys;
-    this.parameters = parameters;
-    this.viewOriginalText = viewOriginalText;
-    this.viewExpandedText = viewExpandedText;
-    this.tableType = tableType;
-
-    List<FieldSchema> partitionKeysUnwrapped = Lists.newArrayList();
-    for (FieldSchemaWrapper w : partitionKeys) partitionKeysUnwrapped.add(w.getFieldSchema());
-    StorageDescriptor sdUnwrapped = sd.getSd();
-    this.table = new Table(tableName, dbName, owner, createTime, lastAccessTime, retention, sdUnwrapped, partitionKeysUnwrapped,
-            parameters, viewOriginalText, viewExpandedText, tableType);
-  }
-
-  public HiveTable(Table table) {
-    if (table == null) return;
-    this.table = table;
-    this.tableName = table.getTableName();
-    this.dbName = table.getDbName();
-    this.owner = table.getOwner();
-    this.createTime = table.getCreateTime();
-    this.lastAccessTime = table.getLastAccessTime();
-    this.retention = table.getRetention();
-    this.sd = new StorageDescriptorWrapper(table.getSd());
-    this.partitionKeys = Lists.newArrayList();
-    for (FieldSchema f : table.getPartitionKeys()) this.partitionKeys.add(new FieldSchemaWrapper(f));
-    this.parameters = table.getParameters();
-    this.viewOriginalText = table.getViewOriginalText();
-    this.viewExpandedText = table.getViewExpandedText();
-    this.tableType = table.getTableType();
-  }
-
-  @JsonIgnore
-  public Table getTable() {
-    return table;
-  }
-
-  public static class HivePartition {
-
-    @JsonIgnore
-    private Partition partition;
-
-    @JsonProperty
-    public List<String> values;
-    @JsonProperty
-    public String tableName;
-    @JsonProperty
-    public String dbName;
-    @JsonProperty
-    public int createTime;
-    @JsonProperty
-    public int lastAccessTime;
-    @JsonProperty
-    public StorageDescriptorWrapper sd;
-    @JsonProperty
-    public Map<String,String> parameters;
-
-    @JsonCreator
-    public HivePartition(@JsonProperty("values") List<String> values, @JsonProperty("tableName") String tableName, @JsonProperty("dbName") String dbName, @JsonProperty("createTime") int createTime,
-                     @JsonProperty("lastAccessTime") int lastAccessTime,  @JsonProperty("sd") StorageDescriptorWrapper sd,
-                     @JsonProperty("parameters") Map<String, String> parameters
-    ) {
-      this.values = values;
-      this.tableName = tableName;
-      this.dbName = dbName;
-      this.createTime = createTime;
-      this.lastAccessTime = lastAccessTime;
-      this.sd = sd;
-      this.parameters = parameters;
-
-      StorageDescriptor sdUnwrapped = sd.getSd();
-      this.partition = new org.apache.hadoop.hive.metastore.api.Partition(values, tableName, dbName, createTime, lastAccessTime, sdUnwrapped, parameters);
-    }
-
-    public HivePartition(Partition partition) {
-      if (partition == null) return;
-      this.partition = partition;
-      this.values = partition.getValues();
-      this.tableName = partition.getTableName();
-      this.dbName = partition.getDbName();
-      this.createTime = partition.getCreateTime();
-      this.lastAccessTime = partition.getLastAccessTime();
-      this.sd = new StorageDescriptorWrapper(partition.getSd());
-      this.parameters = partition.getParameters();
-    }
-
-    @JsonIgnore
-    public Partition getPartition() {
-      return partition;
-    }
-  }
-
-  public static class StorageDescriptorWrapper {
-    @JsonIgnore
-    private StorageDescriptor sd;
-    @JsonProperty
-    public List<FieldSchemaWrapper> cols;
-    @JsonProperty
-    public String location;
-    @JsonProperty
-    public String inputFormat;
-    @JsonProperty
-    public String outputFormat;
-    @JsonProperty
-    public boolean compressed;
-    @JsonProperty
-    public int numBuckets;
-    @JsonProperty
-    public SerDeInfoWrapper serDeInfo;
-//    @JsonProperty
-//    public List<String> bucketCols;
-    @JsonProperty
-    public List<OrderWrapper> sortCols;
-    @JsonProperty
-    public Map<String,String> parameters;
-
-    @JsonCreator
-    public StorageDescriptorWrapper(@JsonProperty("cols") List<FieldSchemaWrapper> cols, @JsonProperty("location") String location, @JsonProperty("inputFormat") String inputFormat,
-                                    @JsonProperty("outputFormat") String outputFormat, @JsonProperty("compressed") boolean compressed, @JsonProperty("numBuckets") int numBuckets,
-                                    @JsonProperty("serDeInfo") SerDeInfoWrapper serDeInfo,  @JsonProperty("sortCols") List<OrderWrapper> sortCols,
-                                    @JsonProperty("parameters") Map<String,String> parameters) {
-      this.cols = cols;
-      this.location = location;
-      this.inputFormat = inputFormat;
-      this.outputFormat = outputFormat;
-      this.compressed = compressed;
-      this.numBuckets = numBuckets;
-      this.serDeInfo = serDeInfo;
-//      this.bucketCols = bucketCols;
-      this.sortCols = sortCols;
-      this.parameters = parameters;
-      List<FieldSchema> colsUnwrapped = Lists.newArrayList();
-      for (FieldSchemaWrapper w: cols) colsUnwrapped.add(w.getFieldSchema());
-      SerDeInfo serDeInfoUnwrapped = serDeInfo.getSerDeInfo();
-      List<Order> sortColsUnwrapped = Lists.newArrayList();
-      for (OrderWrapper w : sortCols) sortColsUnwrapped.add(w.getOrder());
-//      this.sd = new StorageDescriptor(colsUnwrapped, location, inputFormat, outputFormat, compressed, numBuckets, serDeInfoUnwrapped,
-//              bucketCols, sortColsUnwrapped, parameters);
-      this.sd = new StorageDescriptor(colsUnwrapped, location, inputFormat, outputFormat, compressed, numBuckets, serDeInfoUnwrapped,
-              null, sortColsUnwrapped, parameters);
-    }
-
-    public StorageDescriptorWrapper(StorageDescriptor sd) {
-      this.sd = sd;
-      this.cols = Lists.newArrayList();
-      for (FieldSchema f : sd.getCols()) this.cols.add(new FieldSchemaWrapper(f));
-      this.location = sd.getLocation();
-      this.inputFormat = sd.getInputFormat();
-      this.outputFormat = sd.getOutputFormat();
-      this.compressed = sd.isCompressed();
-      this.numBuckets = sd.getNumBuckets();
-      this.serDeInfo = new SerDeInfoWrapper(sd.getSerdeInfo());
-//      this.bucketCols = sd.getBucketCols();
-      this.sortCols = Lists.newArrayList();
-      for (Order o : sd.getSortCols()) this.sortCols.add(new OrderWrapper(o));
-      this.parameters = sd.getParameters();
-    }
-
-    @JsonIgnore
-    public StorageDescriptor getSd() {
-      return sd;
-    }
-
-  }
-
-  public static class SerDeInfoWrapper {
-    @JsonIgnore
-    private SerDeInfo serDeInfo;
-    @JsonProperty
-    public String name;
-    @JsonProperty
-    public String serializationLib;
-    @JsonProperty
-    public Map<String,String> parameters;
-
-    @JsonCreator
-    public SerDeInfoWrapper(@JsonProperty("name") String name, @JsonProperty("serializationLib") String serializationLib, @JsonProperty("parameters") Map<String, String> parameters) {
-      this.name = name;
-      this.serializationLib = serializationLib;
-      this.parameters = parameters;
-      this.serDeInfo = new SerDeInfo(name, serializationLib, parameters);
-    }
-
-    public SerDeInfoWrapper(SerDeInfo serDeInfo) {
-      this.serDeInfo = serDeInfo;
-      this.name = serDeInfo.getName();
-      this.serializationLib = serDeInfo.getSerializationLib();
-      this.parameters = serDeInfo.getParameters();
-    }
-
-    @JsonIgnore
-    public SerDeInfo getSerDeInfo() {
-      return serDeInfo;
-    }
-  }
-
-  public static class FieldSchemaWrapper {
-    @JsonIgnore
-    private FieldSchema fieldSchema;
-    @JsonProperty
-    public String name;
-    @JsonProperty
-    public String type;
-    @JsonProperty
-    public String comment;
-
-    @JsonCreator
-    public FieldSchemaWrapper(@JsonProperty("name") String name, @JsonProperty("type") String type, @JsonProperty("comment") String comment) {
-      this.name = name;
-      this.type = type;
-      this.comment = comment;
-      this.fieldSchema = new FieldSchema(name, type, comment);
-    }
-
-    public FieldSchemaWrapper(FieldSchema fieldSchema) {
-      this.fieldSchema = fieldSchema;
-      this.name = fieldSchema.getName();
-      this.type = fieldSchema.getType();
-      this.comment = fieldSchema.getComment();
-    }
-
-    @JsonIgnore
-    public FieldSchema getFieldSchema() {
-      return fieldSchema;
-    }
-  }
-
-  public static class OrderWrapper {
-    @JsonIgnore
-    private Order ord;
-    @JsonProperty
-    public String col;
-    @JsonProperty
-    public int order;
-
-    @JsonCreator
-    public OrderWrapper(@JsonProperty("col") String col, @JsonProperty("order") int order) {
-      this.col = col;
-      this.order = order;
-    }
-
-    public OrderWrapper(Order ord) {
-      this.ord = ord;
-      this.col = ord.getCol();
-      this.order = ord.getOrder();
-    }
-
-    @JsonIgnore
-    public Order getOrder() {
-      return ord;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
deleted file mode 100644
index 116603c..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.drill.common.exceptions.DrillRuntimeException;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.vector.NullableBigIntVector;
-import org.apache.drill.exec.vector.NullableIntVector;
-import org.apache.drill.exec.vector.NullableVarCharVector;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.exec.vector.allocator.VectorAllocator;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.InputSplit;
-
-import com.google.common.collect.Lists;
-
-/**
- * Note: Native hive text record reader is not complete in implementation. For now use
- * {@link org.apache.drill.exec.store.hive.HiveRecordReader}.
- */
-public class HiveTextRecordReader extends HiveRecordReader {
-
-  public final byte delimiter;
-  public final List<Integer> columnIds;
-  private final int numCols;
-
-  public HiveTextRecordReader(Table table, Partition partition, InputSplit inputSplit, List<SchemaPath> projectedColumns, FragmentContext context) throws ExecutionSetupException {
-    super(table, partition, inputSplit, projectedColumns, context);
-    String d = table.getSd().getSerdeInfo().getParameters().get("field.delim");
-    if (d != null) {
-      delimiter = d.getBytes()[0];
-    } else {
-      delimiter = (byte) 1;
-    }
-    assert delimiter > 0;
-    List<Integer> ids = Lists.newArrayList();
-    for (int i = 0; i < tableColumns.size(); i++) {
-      if (selectedColumnNames.contains(tableColumns.get(i))) {
-        ids.add(i);
-      }
-    }
-    columnIds = ids;
-    numCols = tableColumns.size();
-  }
-
-  public boolean setValue(PrimitiveObjectInspector.PrimitiveCategory pCat, ValueVector vv, int index, byte[] bytes, int start) {
-    switch(pCat) {
-      case BINARY:
-        throw new UnsupportedOperationException();
-      case BOOLEAN:
-        throw new UnsupportedOperationException();
-      case BYTE:
-        throw new UnsupportedOperationException();
-      case DECIMAL:
-        throw new UnsupportedOperationException();
-      case DOUBLE:
-        throw new UnsupportedOperationException();
-      case FLOAT:
-        throw new UnsupportedOperationException();
-      case INT: {
-        int value = 0;
-        byte b;
-        for (int i = start; (b = bytes[i]) != delimiter; i++) {
-          value = (value * 10) + b - 48;
-        }
-        return ((NullableIntVector) vv).getMutator().setSafe(index, value);
-      }
-      case LONG: {
-        long value = 0;
-        byte b;
-        for (int i = start; (b = bytes[i]) != delimiter; i++) {
-          value = (value * 10) + b - 48;
-        }
-        return ((NullableBigIntVector) vv).getMutator().setSafe(index, value);
-      }
-      case SHORT:
-        throw new UnsupportedOperationException();
-      case STRING: {
-        int end = start;
-        for (int i = start; i < bytes.length; i++) {
-          if (bytes[i] == delimiter) {
-            end = i;
-            break;
-          }
-          end = bytes.length;
-        }
-        return ((NullableVarCharVector) vv).getMutator().setSafe(index, bytes, start, end - start);
-      }
-      case TIMESTAMP:
-        throw new UnsupportedOperationException();
-
-      default:
-        throw new UnsupportedOperationException("Could not determine type");
-    }
-  }
-
-
-  @Override
-  public int next() {
-    for (ValueVector vv : vectors) {
-      VectorAllocator.getAllocator(vv, 50).alloc(TARGET_RECORD_COUNT);
-    }
-    try {
-      int recordCount = 0;
-      if (redoRecord != null) {
-        int length = ((Text) value).getLength();
-        byte[] bytes = ((Text) value).getBytes();
-        int[] delimPositions = new int[numCols];
-        delimPositions[0] = -1;
-        int p = 0;
-        for (int i = 0; i < length; i++) {
-          if (bytes[i] == delimiter) {
-            delimPositions[p++] = i;
-          }
-        }
-        for (int id : columnIds) {
-          boolean success = false; // setValue(primitiveCategories.get(id), vectors.get(id), recordCount, bytes, delimPositions[id]);
-          if (!success) {
-            throw new DrillRuntimeException(String.format("Failed to write value for column %s", selectedColumnNames.get(id)));
-          }
-
-        }
-        redoRecord = null;
-      }
-      while (recordCount < TARGET_RECORD_COUNT && reader.next(key, value)) {
-        int length = ((Text) value).getLength();
-        byte[] bytes = ((Text) value).getBytes();
-        int[] delimPositions = new int[numCols + 1];
-        delimPositions[0] = -1;
-        int p = 1;
-        for (int i = 0; i < length; i++) {
-          if (bytes[i] == delimiter) {
-            delimPositions[p++] = i;
-          }
-        }
-        for (int i = 0; i < columnIds.size(); i++) {
-          int id = columnIds.get(i);
-          boolean success = false; // setValue(primitiveCategories.get(i), vectors.get(i), recordCount, bytes, delimPositions[id] + 1);
-          if (!success) {
-            redoRecord = value;
-            if (partition != null) populatePartitionVectors(recordCount);
-            return recordCount;
-          }
-        }
-        recordCount++;
-      }
-      if (partition != null) populatePartitionVectors(recordCount);
-      return recordCount;
-    } catch (IOException e) {
-      throw new DrillRuntimeException(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
deleted file mode 100644
index 949fa06..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive.schema;
-
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.List;
-
-import com.google.common.collect.Lists;
-import org.apache.drill.exec.planner.logical.DrillTable;
-import org.apache.drill.exec.store.hive.HiveReadEntry;
-import org.apache.drill.exec.store.hive.HiveStoragePlugin;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeFactory;
-import org.eigenbase.sql.SqlCollation;
-import org.eigenbase.sql.type.SqlTypeName;
-
-public class DrillHiveTable extends DrillTable{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillHiveTable.class);
-  
-  protected final Table hiveTable;
-  
-  public DrillHiveTable(String storageEngineName, HiveStoragePlugin plugin, HiveReadEntry readEntry) {
-    super(storageEngineName, plugin, readEntry);
-    this.hiveTable = new Table(readEntry.getTable());
-  }
-
-  @Override
-  public RelDataType getRowType(RelDataTypeFactory typeFactory) {
-    List<RelDataType> typeList = Lists.newArrayList();
-    List<String> fieldNameList = Lists.newArrayList();
-
-    List<FieldSchema> hiveFields = hiveTable.getCols();
-    for(FieldSchema hiveField : hiveFields) {
-      fieldNameList.add(hiveField.getName());
-      typeList.add(getNullableRelDataTypeFromHiveType(
-          typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(hiveField.getType())));
-    }
-
-    for (FieldSchema field : hiveTable.getPartitionKeys()) {
-      fieldNameList.add(field.getName());
-      typeList.add(getNullableRelDataTypeFromHiveType(
-          typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(field.getType())));
-    }
-
-    return typeFactory.createStructType(typeList, fieldNameList);
-  }
-
-  private RelDataType getNullableRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
-    RelDataType relDataType = getRelDataTypeFromHiveType(typeFactory, typeInfo);
-    return typeFactory.createTypeWithNullability(relDataType, true);
-  }
-
-  private RelDataType getRelDataTypeFromHivePrimitiveType(RelDataTypeFactory typeFactory, PrimitiveTypeInfo pTypeInfo) {
-    switch(pTypeInfo.getPrimitiveCategory()) {
-      case BOOLEAN:
-        return typeFactory.createSqlType(SqlTypeName.BOOLEAN);
-
-      case BYTE:
-        return typeFactory.createSqlType(SqlTypeName.TINYINT);
-
-      case SHORT:
-        return typeFactory.createSqlType(SqlTypeName.SMALLINT);
-
-      case INT:
-        return typeFactory.createSqlType(SqlTypeName.INTEGER);
-
-      case LONG:
-        return typeFactory.createSqlType(SqlTypeName.BIGINT);
-
-      case FLOAT:
-        return typeFactory.createSqlType(SqlTypeName.FLOAT);
-
-      case DOUBLE:
-        return typeFactory.createSqlType(SqlTypeName.DOUBLE);
-
-      case DATE:
-        return typeFactory.createSqlType(SqlTypeName.DATE);
-
-      case TIMESTAMP:
-        return typeFactory.createSqlType(SqlTypeName.TIMESTAMP);
-
-      case BINARY:
-        return typeFactory.createSqlType(SqlTypeName.BINARY);
-
-      case DECIMAL:
-        final int precision = 38; // Hive 0.12 has standard precision
-        return typeFactory.createSqlType(SqlTypeName.DECIMAL, precision);
-
-      case STRING:
-      case VARCHAR: {
-        int maxLen = TypeInfoUtils.getCharacterLengthForType(pTypeInfo);
-        return typeFactory.createTypeWithCharsetAndCollation(
-          typeFactory.createSqlType(SqlTypeName.VARCHAR, maxLen), /*input type*/
-          Charset.forName("ISO-8859-1"), /*unicode char set*/
-          SqlCollation.IMPLICIT /* TODO: need to decide if implicit is the correct one */
-        );
-      }
-
-      case UNKNOWN:
-      case VOID:
-      default:
-        throwUnsupportedHiveDataTypeError(pTypeInfo.getPrimitiveCategory().toString());
-    }
-
-    return null;
-  }
-
-  private RelDataType getRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
-    switch(typeInfo.getCategory()) {
-      case PRIMITIVE:
-        return getRelDataTypeFromHivePrimitiveType(typeFactory, ((PrimitiveTypeInfo) typeInfo));
-
-      case LIST: {
-        ListTypeInfo listTypeInfo = (ListTypeInfo)typeInfo;
-        RelDataType listElemTypeInfo = getRelDataTypeFromHiveType(typeFactory, listTypeInfo.getListElementTypeInfo());
-        return typeFactory.createArrayType(listElemTypeInfo, -1);
-      }
-
-      case MAP: {
-        MapTypeInfo mapTypeInfo = (MapTypeInfo)typeInfo;
-        RelDataType keyType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapKeyTypeInfo());
-        RelDataType valueType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapValueTypeInfo());
-        return typeFactory.createMapType(keyType, valueType);
-      }
-
-      case STRUCT: {
-        StructTypeInfo structTypeInfo = (StructTypeInfo)typeInfo;
-        ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
-        ArrayList<TypeInfo> fieldHiveTypeInfoList = structTypeInfo.getAllStructFieldTypeInfos();
-        List<RelDataType> fieldRelDataTypeList = Lists.newArrayList();
-        for(TypeInfo fieldHiveType : fieldHiveTypeInfoList) {
-          fieldRelDataTypeList.add(getRelDataTypeFromHiveType(typeFactory, fieldHiveType));
-        }
-        return typeFactory.createStructType(fieldRelDataTypeList, fieldNames);
-      }
-
-      case UNION:
-        logger.warn("There is no UNION data type in SQL. Converting it to Sql type OTHER to avoid " +
-            "breaking INFORMATION_SCHEMA queries");
-        return typeFactory.createSqlType(SqlTypeName.OTHER);
-    }
-
-    throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
-    return null;
-  }
-
-  private void throwUnsupportedHiveDataTypeError(String hiveType) {
-    StringBuilder errMsg = new StringBuilder();
-    errMsg.append(String.format("Unsupported Hive data type %s. ", hiveType));
-    errMsg.append(System.getProperty("line.separator"));
-    errMsg.append("Following Hive data types are supported in Drill INFORMATION_SCHEMA: ");
-    errMsg.append("BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, DATE, TIMESTAMP, BINARY, DECIMAL, STRING, " +
-        "VARCHAR, LIST, MAP, STRUCT and UNION");
-
-    throw new RuntimeException(errMsg.toString());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
deleted file mode 100644
index b575972..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive.schema;
-
-import net.hydromatic.optiq.Schema.TableType;
-import org.apache.drill.exec.planner.logical.DrillViewInfoProvider;
-import org.apache.drill.exec.store.hive.HiveReadEntry;
-import org.apache.drill.exec.store.hive.HiveStoragePlugin;
-
-public class DrillHiveViewTable extends DrillHiveTable implements DrillViewInfoProvider {
-
-  public DrillHiveViewTable(String storageEngineName, HiveStoragePlugin plugin, HiveReadEntry readEntry) {
-    super(storageEngineName, plugin, readEntry);
-  }
-
-  @Override
-  public TableType getJdbcTableType() {
-    return TableType.VIEW;
-  }
-
-  @Override
-  public String getViewSql() {
-    return hiveTable.getViewExpandedText();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java
deleted file mode 100644
index 0df2374..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive.schema;
-
-import java.util.List;
-import java.util.Set;
-
-import net.hydromatic.optiq.Table;
-
-import org.apache.drill.exec.planner.logical.DrillTable;
-import org.apache.drill.exec.store.AbstractSchema;
-import org.apache.drill.exec.store.dfs.FileSystemConfig;
-import org.apache.drill.exec.store.hive.HiveStoragePluginConfig;
-import org.apache.drill.exec.store.hive.schema.HiveSchemaFactory.HiveSchema;
-
-import com.google.common.collect.Sets;
-
-public class HiveDatabaseSchema extends AbstractSchema{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveDatabaseSchema.class);
-
-  private final HiveSchema hiveSchema;
-  private final Set<String> tables;
-
-  public HiveDatabaseSchema( //
-      List<String> tableList, //
-      HiveSchema hiveSchema, //
-      String name) {
-    super(hiveSchema.getSchemaPath(), name);
-    this.hiveSchema = hiveSchema;
-    this.tables = Sets.newHashSet(tableList);
-  }
-
-  @Override
-  public Table getTable(String tableName) {
-    return hiveSchema.getDrillTable(this.name, tableName);
-  }
-
-  @Override
-  public Set<String> getTableNames() {
-    return tables;
-  }
-
-  @Override
-  public String getTypeName() {
-    return HiveStoragePluginConfig.NAME;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
deleted file mode 100644
index 7e6b92b..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive.schema;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.collect.ImmutableList;
-
-import net.hydromatic.optiq.Schema;
-import net.hydromatic.optiq.SchemaPlus;
-
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.exec.planner.logical.DrillTable;
-import org.apache.drill.exec.rpc.user.DrillUser;
-import org.apache.drill.exec.rpc.user.UserSession;
-import org.apache.drill.exec.store.AbstractSchema;
-import org.apache.drill.exec.store.SchemaFactory;
-import org.apache.drill.exec.store.hive.HiveReadEntry;
-import org.apache.drill.exec.store.hive.HiveStoragePlugin;
-import org.apache.drill.exec.store.hive.HiveStoragePluginConfig;
-import org.apache.drill.exec.store.hive.HiveTable;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.thrift.TException;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-public class HiveSchemaFactory implements SchemaFactory {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveSchemaFactory.class);
-
-  private static final String DATABASES = "databases";
-
-  private final HiveMetaStoreClient mClient;
-  private LoadingCache<String, List<String>> databases;
-  private LoadingCache<String, List<String>> tableNameLoader;
-  private LoadingCache<String, LoadingCache<String, HiveReadEntry>> tableLoaders;
-  private HiveStoragePlugin plugin;
-  private final String schemaName;
-
-  public HiveSchemaFactory(HiveStoragePlugin plugin, String name, HiveConf hiveConf) throws ExecutionSetupException {
-    this.schemaName = name;
-    this.plugin = plugin;
-
-    try {
-      this.mClient = new HiveMetaStoreClient(hiveConf);
-    } catch (MetaException e) {
-      throw new ExecutionSetupException("Failure setting up Hive metastore client.", e);
-    }
-
-    databases = CacheBuilder //
-        .newBuilder() //
-        .expireAfterAccess(1, TimeUnit.MINUTES) //
-        .build(new DatabaseLoader());
-
-    tableNameLoader = CacheBuilder //
-        .newBuilder() //
-        .expireAfterAccess(1, TimeUnit.MINUTES) //
-        .build(new TableNameLoader());
-
-    tableLoaders = CacheBuilder //
-        .newBuilder() //
-        .expireAfterAccess(4, TimeUnit.HOURS) //
-        .maximumSize(20) //
-        .build(new TableLoaderLoader());
-  }
-
-  private class TableNameLoader extends CacheLoader<String, List<String>> {
-
-    @Override
-    public List<String> load(String dbName) throws Exception {
-      try {
-        return mClient.getAllTables(dbName);
-      } catch (TException e) {
-        logger.warn("Failure while attempting to get hive tables", e);
-        mClient.reconnect();
-        return mClient.getAllTables(dbName);
-      }
-    }
-
-  }
-
-  private class DatabaseLoader extends CacheLoader<String, List<String>> {
-
-    @Override
-    public List<String> load(String key) throws Exception {
-      if (!DATABASES.equals(key))
-        throw new UnsupportedOperationException();
-      try {
-        return mClient.getAllDatabases();
-      } catch (TException e) {
-        logger.warn("Failure while attempting to get hive tables", e);
-        mClient.reconnect();
-        return mClient.getAllDatabases();
-      }
-    }
-  }
-
-  private class TableLoaderLoader extends CacheLoader<String, LoadingCache<String, HiveReadEntry>> {
-
-    @Override
-    public LoadingCache<String, HiveReadEntry> load(String key) throws Exception {
-      return CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.MINUTES).build(new TableLoader(key));
-    }
-
-  }
-
-  private class TableLoader extends CacheLoader<String, HiveReadEntry> {
-
-    private final String dbName;
-
-    public TableLoader(String dbName) {
-      super();
-      this.dbName = dbName;
-    }
-
-    @Override
-    public HiveReadEntry load(String key) throws Exception {
-      Table t = null;
-      try {
-        t = mClient.getTable(dbName, key);
-      } catch (TException e) {
-        mClient.reconnect();
-        t = mClient.getTable(dbName, key);
-      }
-
-      if (t == null)
-        throw new UnknownTableException(String.format("Unable to find table '%s'.", key));
-
-      List<Partition> partitions = null;
-      try {
-        partitions = mClient.listPartitions(dbName, key, Short.MAX_VALUE);
-      } catch (TException e) {
-        mClient.reconnect();
-        partitions = mClient.listPartitions(dbName, key, Short.MAX_VALUE);
-      }
-
-      List<HiveTable.HivePartition> hivePartitions = Lists.newArrayList();
-      for (Partition part : partitions) {
-        hivePartitions.add(new HiveTable.HivePartition(part));
-      }
-
-      if (hivePartitions.size() == 0)
-        hivePartitions = null;
-      return new HiveReadEntry(new HiveTable(t), hivePartitions);
-
-    }
-
-  }
-
-  @Override
-  public void registerSchemas(UserSession session, SchemaPlus parent) {
-    HiveSchema schema = new HiveSchema(schemaName);
-    SchemaPlus hPlus = parent.add(schemaName, schema);
-    schema.setHolder(hPlus);
-  }
-
-  class HiveSchema extends AbstractSchema {
-
-    private HiveDatabaseSchema defaultSchema;
-
-    public HiveSchema(String name) {
-      super(ImmutableList.<String>of(), name);
-      getSubSchema("default");
-    }
-
-    @Override
-    public Schema getSubSchema(String name) {
-      List<String> tables;
-      try {
-        tables = tableNameLoader.get(name);
-        HiveDatabaseSchema schema = new HiveDatabaseSchema(tables, this, name);
-        if(name.equals("default")){
-          this.defaultSchema = schema;
-        }
-        return schema;
-      } catch (ExecutionException e) {
-        logger.warn("Failure while attempting to access HiveDatabase '{}'.", name, e.getCause());
-        return null;
-      }
-
-    }
-
-
-    void setHolder(SchemaPlus plusOfThis){
-      for(String s : getSubSchemaNames()){
-        plusOfThis.add(s, getSubSchema(s));
-      }
-    }
-
-    @Override
-    public boolean showInInformationSchema() {
-      return false;
-    }
-
-    @Override
-    public Set<String> getSubSchemaNames() {
-      try{
-        List<String> dbs = databases.get(DATABASES);
-        return Sets.newHashSet(dbs);
-      }catch(ExecutionException e){
-        logger.warn("Failure while getting Hive database list.", e);
-      }
-      return super.getSubSchemaNames();
-    }
-
-    @Override
-    public net.hydromatic.optiq.Table getTable(String name) {
-      if(defaultSchema == null){
-        return super.getTable(name);
-      }
-      return defaultSchema.getTable(name);
-    }
-
-    @Override
-    public Set<String> getTableNames() {
-      if(defaultSchema == null){
-        return super.getTableNames();
-      }
-      return defaultSchema.getTableNames();
-    }
-
-    List<String> getTableNames(String dbName){
-      try{
-        return tableNameLoader.get(dbName);
-      }catch(ExecutionException e){
-        logger.warn("Failure while loading table names for database '{}'.", dbName, e.getCause());
-        return Collections.emptyList();
-      }
-    }
-
-    DrillTable getDrillTable(String dbName, String t){
-      HiveReadEntry entry = getSelectionBaseOnName(dbName, t);
-      if(entry == null) return null;
-
-      if (entry.getJdbcTableType() == TableType.VIEW) {
-        return new DrillHiveViewTable(schemaName, plugin, entry);
-      } else {
-        return new DrillHiveTable(schemaName, plugin, entry);
-      }
-    }
-
-    HiveReadEntry getSelectionBaseOnName(String dbName, String t) {
-      if(dbName == null) dbName = "default";
-      try{
-        return tableLoaders.get(dbName).get(t);
-      }catch(ExecutionException e){
-        logger.warn("Exception occurred while trying to read table. {}.{}", dbName, t, e.getCause());
-        return null;
-      }
-    }
-
-    @Override
-    public AbstractSchema getDefaultSchema() {
-      return defaultSchema;
-    }
-
-    @Override
-    public String getTypeName() {
-      return HiveStoragePluginConfig.NAME;
-    }
-
-  }
-
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestHiveUDFs.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestHiveUDFs.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestHiveUDFs.java
deleted file mode 100644
index b2fa898..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestHiveUDFs.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.physical.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import mockit.Injectable;
-import mockit.NonStrictExpectations;
-
-import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.exec.ExecTest;
-import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
-import org.apache.drill.exec.memory.TopLevelAllocator;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.physical.PhysicalPlan;
-import org.apache.drill.exec.physical.base.FragmentRoot;
-import org.apache.drill.exec.planner.PhysicalPlanReader;
-import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.proto.CoordinationProtos;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
-import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.vector.Float4Vector;
-import org.apache.drill.exec.vector.NullableBigIntVector;
-import org.apache.drill.exec.vector.NullableBitVector;
-import org.apache.drill.exec.vector.NullableFloat8Vector;
-import org.apache.drill.exec.vector.NullableIntVector;
-import org.apache.drill.exec.vector.NullableVar16CharVector;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.exec.vector.Var16CharVector;
-import org.junit.Test;
-
-import com.codahale.metrics.MetricRegistry;
-import com.google.common.base.Charsets;
-import com.google.common.io.Resources;
-
-import java.util.Iterator;
-
-public class TestHiveUDFs extends ExecTest {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestHiveUDFs.class);
-
-  DrillConfig c = DrillConfig.create();
-  PhysicalPlanReader reader;
-  FunctionImplementationRegistry registry;
-  FragmentContext context;
-
-  private void setup(final DrillbitContext bitContext, UserClientConnection connection) throws Throwable {
-    if(reader == null)
-      reader = new PhysicalPlanReader(c, c.getMapper(), CoordinationProtos.DrillbitEndpoint.getDefaultInstance());
-    if(registry == null)
-      registry = new FunctionImplementationRegistry(c);
-    if(context == null)
-      context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
-  }
-
-  @Test
-  public void testGenericUDF(@Injectable final DrillbitContext bitContext,
-                      @Injectable UserClientConnection connection) throws Throwable {
-    new NonStrictExpectations(){{
-      bitContext.getMetrics(); result = new MetricRegistry();
-      bitContext.getAllocator(); result = new TopLevelAllocator();
-      bitContext.getOperatorCreatorRegistry(); result = new OperatorCreatorRegistry(c);
-    }};
-
-    String planString = Resources.toString(Resources.getResource("functions/hive/GenericUDF.json"), Charsets.UTF_8);
-
-    setup(bitContext, connection);
-    PhysicalPlan plan = reader.readPhysicalPlan(planString);
-    SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
-
-    int numRecords = 0;
-
-    while(exec.next()){
-      // Output columns and types
-      //  1. str1 : Var16Char
-      //  2. upperStr1 : NullableVar16Char
-      //  3. unix_timestamp : NullableBigInt
-      //  4. concat : NullableVarChar
-      //  5. flt1 : Float4
-      //  6. format_number : NullableFloat8
-      //  7. nullableStr1 : NullableVar16Char
-      //  8. upperNullableStr1 : NullableVar16Char
-      Iterator<ValueVector> vv = exec.iterator();
-      Var16CharVector str1V = (Var16CharVector) vv.next();
-      NullableVar16CharVector upperStr1V = (NullableVar16CharVector) vv.next();
-      NullableBigIntVector unix_timestampV = (NullableBigIntVector) vv.next();
-      NullableVar16CharVector concatV = (NullableVar16CharVector) vv.next();
-      Float4Vector flt1V = (Float4Vector) vv.next();
-      NullableVar16CharVector format_numberV = (NullableVar16CharVector) vv.next();
-      NullableVar16CharVector nullableStr1V = ((NullableVar16CharVector) vv.next());
-      NullableVar16CharVector upperNullableStr1V = ((NullableVar16CharVector) vv.next());
-
-      for(int i=0; i<exec.getRecordCount(); i++) {
-
-
-        String in = new String(str1V.getAccessor().get(i), Charsets.UTF_16);
-        String upper = new String(upperStr1V.getAccessor().get(i), Charsets.UTF_16);
-        assertTrue(in.toUpperCase().equals(upper));
-
-        long unix_timestamp = unix_timestampV.getAccessor().get(i);
-
-        String concat = new String(concatV.getAccessor().get(i), Charsets.UTF_16);
-        assertTrue(concat.equals(in+"-"+in));
-
-        float flt1 = flt1V.getAccessor().get(i);
-        String format_number = new String(format_numberV.getAccessor().get(i), Charsets.UTF_16);
-
-
-        String nullableStr1 = null;
-        if (!nullableStr1V.getAccessor().isNull(i))
-          nullableStr1 = new String(nullableStr1V.getAccessor().get(i), Charsets.UTF_16);
-
-        String upperNullableStr1 = null;
-        if (!upperNullableStr1V.getAccessor().isNull(i))
-          upperNullableStr1 = new String(upperNullableStr1V.getAccessor().get(i), Charsets.UTF_16);
-
-        assertEquals(nullableStr1 != null, upperNullableStr1 != null);
-        if (nullableStr1 != null)
-          assertEquals(nullableStr1.toUpperCase(), upperNullableStr1);
-
-        System.out.println(in + ", " + upper + ", " + unix_timestamp + ", " + concat + ", " +
-          flt1 + ", " + format_number + ", " + nullableStr1 + ", " + upperNullableStr1);
-
-        numRecords++;
-      }
-    }
-
-    System.out.println("Processed " + numRecords + " records");
-
-    if(context.getFailureCause() != null){
-      throw context.getFailureCause();
-    }
-
-    assertTrue(!context.isFailed());
-  }
-
-  @Test
-  public void testUDF(@Injectable final DrillbitContext bitContext,
-                             @Injectable UserClientConnection connection) throws Throwable {
-    new NonStrictExpectations(){{
-      bitContext.getMetrics(); result = new MetricRegistry();
-      bitContext.getAllocator(); result = new TopLevelAllocator();
-      bitContext.getOperatorCreatorRegistry(); result = new OperatorCreatorRegistry(c);
-    }};
-
-    String planString = Resources.toString(Resources.getResource("functions/hive/UDF.json"), Charsets.UTF_8);
-
-    setup(bitContext, connection);
-    PhysicalPlan plan = reader.readPhysicalPlan(planString);
-    SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
-
-    int numRecords = 0;
-    while(exec.next()){
-      // Output columns and types
-      // 1. str1 : Var16Char
-      // 2. str1Length : Int
-      // 3. str1Ascii : Int
-      // 4. flt1 : Float4
-      // 5. pow : Float8
-      Iterator<ValueVector> vv = exec.iterator();
-      Var16CharVector str1V = (Var16CharVector) vv.next();
-      NullableIntVector str1LengthV = (NullableIntVector) vv.next();
-      NullableIntVector str1AsciiV = (NullableIntVector) vv.next();
-      Float4Vector flt1V = (Float4Vector) vv.next();
-      NullableFloat8Vector powV = (NullableFloat8Vector) vv.next();
-
-      for(int i=0; i<exec.getRecordCount(); i++) {
-
-        String str1 = new String(str1V.getAccessor().get(i), Charsets.UTF_16);
-        int str1Length = str1LengthV.getAccessor().get(i);
-        assertTrue(str1.length() == str1Length);
-
-        int str1Ascii = str1AsciiV.getAccessor().get(i);
-
-        float flt1 = flt1V.getAccessor().get(i);
-
-        double pow = 0;
-        if (!powV.getAccessor().isNull(i)) {
-          pow = powV.getAccessor().get(i);
-          assertTrue(Math.pow(flt1, 2.0) == pow);
-        }
-
-        System.out.println(str1 + ", " + str1Length + ", " + str1Ascii + ", " + flt1 + ", " + pow);
-        numRecords++;
-      }
-    }
-
-    System.out.println("Processed " + numRecords + " records");
-
-    if(context.getFailureCause() != null){
-      throw context.getFailureCause();
-    }
-
-    assertTrue(!context.isFailed());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
deleted file mode 100644
index e051abb..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.drill.exec.store.hive;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.sql.Date;
-import java.sql.Timestamp;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.CommandNeedRetryException;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-public class HiveTestDataGenerator {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveTestDataGenerator.class);
-
-  static int RETRIES = 5;
-  private Driver hiveDriver = null;
-  private static final String DB_DIR = "/tmp/drill_hive_db";
-  private static final String WH_DIR = "/tmp/drill_hive_wh";
-  
-  public static void main(String[] args) throws Exception {
-    HiveTestDataGenerator htd = new HiveTestDataGenerator();
-    htd.generateTestData();
-  }
-
-  private void cleanDir(String dir) throws IOException{
-    File f = new File(dir);
-    if(f.exists()){
-      FileUtils.cleanDirectory(f);
-      FileUtils.forceDelete(f);
-    }
-  }
-  
-  public void generateTestData() throws Exception {
-    
-    // remove data from previous runs.
-    cleanDir(DB_DIR);
-    cleanDir(WH_DIR);
-    
-    HiveConf conf = new HiveConf();
-
-    conf.set("javax.jdo.option.ConnectionURL", String.format("jdbc:derby:;databaseName=%s;create=true", DB_DIR));
-    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
-    conf.set("hive.metastore.warehouse.dir", WH_DIR);
-
-    SessionState ss = new SessionState(new HiveConf(SessionState.class));
-    SessionState.start(ss);
-    hiveDriver = new Driver(conf);
-
-    // generate (key, value) test data
-    String testDataFile = generateTestDataFile();
-
-    createTableAndLoadData("default", "kv", testDataFile);
-    executeQuery("CREATE DATABASE IF NOT EXISTS db1");
-    createTableAndLoadData("db1", "kv_db1", testDataFile);
-
-    // Generate data with date and timestamp data type
-    String testDateDataFile = generateTestDataFileWithDate();
-
-    // create table with date and timestamp data type
-    executeQuery("USE default");
-    executeQuery("CREATE TABLE IF NOT EXISTS default.foodate(a DATE, b TIMESTAMP) "+
-        "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE");
-    executeQuery(String.format("LOAD DATA LOCAL INPATH '%s' OVERWRITE INTO TABLE default.foodate", testDateDataFile));
-
-    // create a table with no data
-    executeQuery("CREATE TABLE IF NOT EXISTS default.empty_table(a INT, b STRING)");
-
-    // create a Hive table that has columns with data types which are supported for reading in Drill.
-    testDataFile = generateAllTypesDataFile();
-    executeQuery(
-        "CREATE TABLE IF NOT EXISTS readtest (" +
-        "  binary_field BINARY," +
-        "  boolean_field BOOLEAN," +
-        "  tinyint_field TINYINT," +
-        "  decimal_field DECIMAL," +
-        "  double_field DOUBLE," +
-        "  float_field FLOAT," +
-        "  int_field INT," +
-        "  bigint_field BIGINT," +
-        "  smallint_field SMALLINT," +
-        "  string_field STRING," +
-        "  varchar_field VARCHAR(50)," +
-        "  timestamp_field TIMESTAMP," +
-        "  date_field DATE" +
-        ") PARTITIONED BY (" +
-        "  binary_part BINARY," +
-        "  boolean_part BOOLEAN," +
-        "  tinyint_part TINYINT," +
-        "  decimal_part DECIMAL," +
-        "  double_part DOUBLE," +
-        "  float_part FLOAT," +
-        "  int_part INT," +
-        "  bigint_part BIGINT," +
-        "  smallint_part SMALLINT," +
-        "  string_part STRING," +
-        "  varchar_part VARCHAR(50)," +
-        "  timestamp_part TIMESTAMP," +
-        "  date_part DATE" +
-        ") ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE"
-    );
-
-    // Add a partition to table 'readtest'
-    executeQuery(
-        "ALTER TABLE readtest ADD IF NOT EXISTS PARTITION ( " +
-        "  binary_part='binary', " +
-        "  boolean_part='true', " +
-        "  tinyint_part='64', " +
-        "  decimal_part='3489423929323435243', " +
-        "  double_part='8.345', " +
-        "  float_part='4.67', " +
-        "  int_part='123456', " +
-        "  bigint_part='234235', " +
-        "  smallint_part='3455', " +
-        "  string_part='string', " +
-        "  varchar_part='varchar', " +
-        "  timestamp_part='2013-07-05 17:01:00', " +
-        "  date_part='2013-07-05')"
-    );
-
-    // Load data into table 'readtest'
-    executeQuery(String.format("LOAD DATA LOCAL INPATH '%s' OVERWRITE INTO TABLE default.readtest PARTITION (" +
-        "  binary_part='binary', " +
-        "  boolean_part='true', " +
-        "  tinyint_part='64', " +
-        "  decimal_part='3489423929323435243', " +
-        "  double_part='8.345', " +
-        "  float_part='4.67', " +
-        "  int_part='123456', " +
-        "  bigint_part='234235', " +
-        "  smallint_part='3455', " +
-        "  string_part='string', " +
-        "  varchar_part='varchar', " +
-        "  timestamp_part='2013-07-05 17:01:00', " +
-        "  date_part='2013-07-05')", testDataFile));
-
-    // create a table that has all Hive types. This is to test how hive tables metadata is populated in
-    // Drill's INFORMATION_SCHEMA.
-    executeQuery("CREATE TABLE IF NOT EXISTS infoschematest(" +
-        "booleanType BOOLEAN, " +
-        "tinyintType TINYINT, " +
-        "smallintType SMALLINT, " +
-        "intType INT, " +
-        "bigintType BIGINT, " +
-        "floatType FLOAT, " +
-        "doubleType DOUBLE, " +
-        "dataType DATE, " +
-        "timestampType TIMESTAMP, " +
-        "binaryType BINARY, " +
-        "decimalType DECIMAL, " +
-        "stringType STRING, " +
-        "varCharType VARCHAR(20), " +
-        "listType ARRAY<STRING>, " +
-        "mapType MAP<STRING,INT>, " +
-        "structType STRUCT<sint:INT,sboolean:BOOLEAN,sstring:STRING>, " +
-        "uniontypeType UNIONTYPE<int, double, array<string>>)"
-    );
-
-    // create a Hive view to test how its metadata is populated in Drill's INFORMATION_SCHEMA
-    executeQuery("CREATE VIEW IF NOT EXISTS hiveview AS SELECT * FROM kv");
-
-    ss.close();
-  }
-
-  private void createTableAndLoadData(String dbName, String tblName, String dataFile) {
-    executeQuery(String.format("USE %s", dbName));
-    executeQuery(String.format("CREATE TABLE IF NOT EXISTS %s.%s(key INT, value STRING) "+
-        "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE", dbName, tblName));
-    executeQuery(String.format("LOAD DATA LOCAL INPATH '%s' OVERWRITE INTO TABLE %s.%s", dataFile, dbName, tblName));
-  }
-
-  private File getTempFile() throws Exception {
-    File file = null;
-    while (true) {
-      file = File.createTempFile("drill-hive-test", ".txt");
-      if (file.exists()) {
-        boolean success = file.delete();
-        if (success) {
-          break;
-        }
-      }
-      logger.debug("retry creating tmp file");
-    }
-
-    return file;
-  }
-
-  private String generateTestDataFile() throws Exception {
-    File file = getTempFile();
-
-    PrintWriter printWriter = new PrintWriter(file);
-    for (int i=1; i<=5; i++)
-      printWriter.println (String.format("%d, key_%d", i, i));
-    printWriter.close();
-
-    return file.getPath();
-  }
-
-  private String generateTestDataFileWithDate() throws Exception {
-    File file = getTempFile();
-
-    PrintWriter printWriter = new PrintWriter(file);
-    for (int i=1; i<=5; i++) {
-      Date date = new Date(System.currentTimeMillis());
-      Timestamp ts = new Timestamp(System.currentTimeMillis());
-      printWriter.println (String.format("%s,%s", date.toString(), ts.toString()));
-    }
-    printWriter.close();
-
-    return file.getPath();
-  }
-
-  private String generateAllTypesDataFile() throws Exception {
-    File file = getTempFile();
-
-    PrintWriter printWriter = new PrintWriter(file);
-    printWriter.println("YmluYXJ5ZmllbGQ=,false,34,3489423929323435243,8.345,4.67,123456,234235,3455,stringfield,varcharfield,2013-07-05 17:01:00,2013-07-05");
-    printWriter.close();
-
-    return file.getPath();
-  }
-
-  private void executeQuery(String query) {
-    CommandProcessorResponse response = null;
-    boolean failed = false;
-    int retryCount = RETRIES;
-
-    try {
-      response = hiveDriver.run(query);
-    } catch(CommandNeedRetryException ex) {
-      if (--retryCount == 0)
-        failed = true;
-    }
-
-    if (failed || response.getResponseCode() != 0 )
-      throw new RuntimeException(String.format("Failed to execute command '%s', errorMsg = '%s'",
-        query, (response != null ? response.getErrorMessage() : "")));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/test/resources/functions/hive/GenericUDF.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/functions/hive/GenericUDF.json b/exec/java-exec/src/test/resources/functions/hive/GenericUDF.json
deleted file mode 100644
index e849e00..0000000
--- a/exec/java-exec/src/test/resources/functions/hive/GenericUDF.json
+++ /dev/null
@@ -1,45 +0,0 @@
-{
-    head:{
-        type:"APACHE_DRILL_PHYSICAL",
-        version:"1",
-        generator:{
-            type:"manual"
-        }
-    },
-    graph:[
-        {
-            @id:1,
-            pop:"mock-sub-scan",
-            url: "http://apache.org",
-            entries:[
-                {records: 100, types: [
-                   {name: "str1", type: "VAR16CHAR", mode: "REQUIRED"},
-                   {name: "str2", type: "VAR16CHAR", mode: "REQUIRED"},
-                   {name: "str3", type: "VAR16CHAR", mode: "REQUIRED"},
-                   {name: "nullableStr1", type: "VAR16CHAR", mode: "OPTIONAL"},
-                   {name: "flt1", type: "FLOAT4", mode: "REQUIRED"}
-                ]}
-            ]
-        },
-        {
-            @id:2,
-            child: 1,
-            pop:"project",
-            exprs: [
-                { ref: "str1", expr: "str1" },
-                { ref: "upperStr1", expr: "ucase(str1)" },
-                { ref: "unix_timestamp", expr: "unix_timestamp()" },
-                { ref: "concat", expr: "concat_ws('-', str2, str3)" },
-                { ref: "flt1", expr: "flt1" },
-                { ref: "format_number", expr: "format_number(cast(flt1 as float8), cast(2 as int))" },
-                { ref: "nullableStr1", expr: "nullableStr1" },
-                { ref: "upperNulableStr1", expr: "upper(nullableStr1)" }
-            ]
-        },
-        {
-            @id: 3,
-            child: 2,
-            pop: "screen"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/test/resources/functions/hive/UDF.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/functions/hive/UDF.json b/exec/java-exec/src/test/resources/functions/hive/UDF.json
deleted file mode 100644
index cd4c7b1..0000000
--- a/exec/java-exec/src/test/resources/functions/hive/UDF.json
+++ /dev/null
@@ -1,39 +0,0 @@
-{
-    head:{
-        type:"APACHE_DRILL_PHYSICAL",
-        version:"1",
-        generator:{
-            type:"manual"
-        }
-    },
-    graph:[
-        {
-            @id:1,
-            pop:"mock-sub-scan",
-            url: "http://apache.org",
-            entries:[
-                {records: 100, types: [
-                   {name: "str1", type: "VAR16CHAR", mode: "REQUIRED"},
-                   {name: "flt1", type: "FLOAT4", mode: "REQUIRED"}
-                ]}
-            ]
-        },
-        {
-            @id:2,
-            child: 1,
-            pop:"project",
-            exprs: [
-                { ref: "str1", expr: "str1" },
-                { ref: "str1Length", expr: "length(str1)" },
-                { ref: "str1Ascii", expr: "ascii(str1)" },
-                { ref: "flt1", expr: "flt1" },
-                { ref: "pow", expr: "pow(cast(flt1 as float8), 2.0)" }
-            ]
-        },
-        {
-            @id: 3,
-            child: 2,
-            pop: "screen"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/jdbc/pom.xml
----------------------------------------------------------------------
diff --git a/exec/jdbc/pom.xml b/exec/jdbc/pom.xml
index 1cb5844..9906a5a 100644
--- a/exec/jdbc/pom.xml
+++ b/exec/jdbc/pom.xml
@@ -35,6 +35,11 @@
       <artifactId>drill-java-exec</artifactId>
       <version>${project.version}</version>
     </dependency>
+     <dependency>
+      <groupId>org.apache.drill.contrib.storage-hive</groupId>
+      <artifactId>drill-storage-hive-core</artifactId>
+      <version>${project.version}</version>
+    </dependency>
     <dependency>
       <groupId>org.apache.drill</groupId>
       <artifactId>drill-common</artifactId>
@@ -50,6 +55,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.drill.contrib.storage-hive</groupId>
+      <artifactId>drill-storage-hive-core</artifactId>
+      <version>${project.version}</version>
+      <classifier>tests</classifier>
+    </dependency>
+    <dependency>
       <groupId>pentaho</groupId>
       <artifactId>mondrian-data-foodmart-queries</artifactId>
       <version>0.3</version>


[32/32] git commit: Allocate buffers when the data empty so that downstream operators do not fail in VV transfer

Posted by ja...@apache.org.
Allocate buffers when the data empty so that downstream operators do not fail in VV transfer


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/efa3274b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/efa3274b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/efa3274b

Branch: refs/heads/master
Commit: efa3274bdb0b7f91e8b5877a966a22fb4c59dfd8
Parents: 65f6bcb
Author: vkorukanti <ve...@gmail.com>
Authored: Fri Jun 20 12:05:05 2014 -0700
Committer: vkorukanti <ve...@gmail.com>
Committed: Fri Jun 20 12:05:05 2014 -0700

----------------------------------------------------------------------
 .../org/apache/drill/exec/store/hive/HiveRecordReader.java   | 8 ++++----
 .../java/org/apache/drill/exec/physical/impl/ScanBatch.java  | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/efa3274b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
index c062f8c..ddb08c6 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
@@ -247,13 +247,13 @@ public class HiveRecordReader implements RecordReader {
 
   @Override
   public int next() {
-    if (empty) {
-      return 0;
-    }
-
     for (ValueVector vv : vectors) {
       VectorAllocator.getAllocator(vv, FIELD_SIZE).alloc(TARGET_RECORD_COUNT);
     }
+    if (empty) {
+      setValueCountAndPopulatePartitionVectors(0);
+      return 0;
+    }
 
     try {
       int recordCount = 0;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/efa3274b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
index 5f8bfb9..f440546 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
@@ -139,7 +139,6 @@ public class ScanBatch implements RecordBatch {
         try {
           if (!readers.hasNext()) {
             currentReader.cleanup();
-            releaseAssets();
             if (first) {
               first = false;
               done = true;
@@ -148,6 +147,7 @@ public class ScanBatch implements RecordBatch {
               schema = container.getSchema();
               return IterOutcome.OK_NEW_SCHEMA;
             }
+            releaseAssets();
             return IterOutcome.NONE;
           }
           oContext.getStats().startSetup();


[21/32] git commit: DRILL-1016: Propagate negative sign while casting from decimal18 to decimal38

Posted by ja...@apache.org.
DRILL-1016: Propagate negative sign while casting from decimal18 to decimal38


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/da618239
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/da618239
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/da618239

Branch: refs/heads/master
Commit: da618239d7f1347706e0e183ed0547e6e257762f
Parents: 43bb57e
Author: Mehant Baid <me...@gmail.com>
Authored: Wed Jun 18 22:16:36 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Fri Jun 20 10:56:15 2014 -0700

----------------------------------------------------------------------
 .../codegen/templates/Decimal/CastSrcDecimalSimple.java  |  3 ++-
 .../org/apache/drill/jdbc/test/TestFunctionsQuery.java   | 11 +++++++++++
 2 files changed, 13 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/da618239/exec/java-exec/src/main/codegen/templates/Decimal/CastSrcDecimalSimple.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/CastSrcDecimalSimple.java b/exec/java-exec/src/main/codegen/templates/Decimal/CastSrcDecimalSimple.java
index 7ef806f..aac45e0 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/CastSrcDecimalSimple.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/CastSrcDecimalSimple.java
@@ -169,7 +169,6 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc{
 
         out.buffer = buffer;
         out.start = 0;
-        out.setSign((in.value < 0));
 
         /* Since we will be dividing the decimal value with base 1 billion
          * we don't want negative results if the decimal is negative.
@@ -213,6 +212,8 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc{
         if (in.scale != out.scale) {
           org.apache.drill.common.util.DecimalUtility.roundDecimal(out.buffer, out.start, out.nDecimalDigits, out.scale, in.scale);
         }
+        // Set the sign
+        out.setSign((in.value < 0));
     }
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/da618239/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestFunctionsQuery.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestFunctionsQuery.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestFunctionsQuery.java
index 39919a5..64bdf6d 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestFunctionsQuery.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestFunctionsQuery.java
@@ -559,4 +559,15 @@ public class TestFunctionsQuery {
         .sql(query)
         .returns("col1=2003-07-09; col2=2003-07-09; col3=2003-07-09");
   }
+
+  @Test
+  public void testDecimal18Decimal38Comparison() throws Exception {
+    String query = "select cast('999999999.999999999' as decimal(18, 9)) = cast('999999999.999999999' as decimal(38, 18)) as CMP " +
+        "from cp.`employee.json` where employee_id = 1";
+
+    JdbcAssert.withNoDefaultSchema()
+        .sql(query)
+        .returns(
+            "CMP=true\n");
+  }
 }


[14/32] DRILL-875: Fixes for DRILL-707, DRILL-780, DRILL-835 (Schema change), DRILL-852, DRILL-876, DRILL_877, DRILL-878, DRILL-890

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/drillClientImpl.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/drillClientImpl.hpp b/contrib/native/client/src/clientlib/drillClientImpl.hpp
index 0046b3e..aa06556 100644
--- a/contrib/native/client/src/clientlib/drillClientImpl.hpp
+++ b/contrib/native/client/src/clientlib/drillClientImpl.hpp
@@ -17,14 +17,23 @@
  */
 
 
-#ifndef DRILL_CLIENT_ASYNC_H
-#define DRILL_CLIENT_ASYNC_H
+#ifndef DRILL_CLIENT_IMPL_H
+#define DRILL_CLIENT_IMPL_H
+
+/* Define some BOOST defines */
+#define BOOST_ASIO_ENABLE_CANCELIO
+// If we want to support older versions of windows than Windows 7, we should
+// disable IOCP
+//#ifdef _WIN32
+//#define BOOST_ASIO_DISABLE_IOCP
+//#endif // _WIN32
 
 #include <stdlib.h>
 #include <time.h>
 #include <queue>
 #include <vector>
 #include <boost/asio.hpp>
+#include <boost/asio/deadline_timer.hpp>
 #include <boost/thread.hpp>
 #include <zookeeper/zookeeper.h>
 
@@ -32,6 +41,7 @@
 #include "drill/drillClient.hpp"
 #include "rpcEncoder.hpp"
 #include "rpcDecoder.hpp"
+#include "utils.hpp"
 #include "User.pb.h"
 #include "UserBitShared.pb.h"
 
@@ -43,7 +53,6 @@ class OutBoundRpcMessage;
 class RecordBatch;
 class RpcEncoder;
 class RpcDecoder;
-//struct UserServerEndPoint;
 
 class DrillClientQueryResult{
     friend class DrillClientImpl;
@@ -51,17 +60,20 @@ class DrillClientQueryResult{
     DrillClientQueryResult(DrillClientImpl * pClient, uint64_t coordId):
         m_pClient(pClient),
         m_coordinationId(coordId),
+        m_numBatches(0),
+        m_columnDefs(new std::vector<Drill::FieldMetadata*>),
         m_bIsQueryPending(true),
         m_bIsLastChunk(false),
         m_bCancel(false),
-        m_bHasSchemaChanged(false), 
+        m_bHasSchemaChanged(false),
         m_bHasData(false),
         m_bHasError(false),
         m_pError(NULL),
         m_pQueryId(NULL),
+        m_pSchemaListener(NULL),
         m_pResultsListener(NULL),
-        m_pListenerCtx(NULL)
-    {};
+        m_pListenerCtx(NULL) {
+    };
 
     ~DrillClientQueryResult(){
         this->clearAndDestroy();
@@ -73,25 +85,29 @@ class DrillClientQueryResult{
         this->m_pListenerCtx = listenerCtx;
     }
 
-    // Synchronous call to get data. Caller assumes ownership of the recod batch 
+    void registerSchemaChangeListener(pfnSchemaListener l){
+        m_pSchemaListener=l;
+    }
+
+    // Synchronous call to get data. Caller assumes ownership of the recod batch
     // returned and it is assumed to have been consumed.
     RecordBatch*  getNext();
-    // Synchronous call to get a look at the next Record Batch. This 
-    // call does not move the current pointer forward. Repeatied calls 
+    // Synchronous call to get a look at the next Record Batch. This
+    // call does not move the current pointer forward. Repeatied calls
     // to peekNext return the same value until getNext is called.
     RecordBatch*  peekNext();
     // Blocks until data is available.
     void waitForData();
 
     // placeholder to return an empty col def vector when calls are made out of order.
-    static std::vector<Drill::FieldMetadata*> s_emptyColDefs;
+    static FieldDefPtr s_emptyColDefs;
 
-    std::vector<Drill::FieldMetadata*>& getColumnDefs(){ 
+    FieldDefPtr getColumnDefs(){
         boost::lock_guard<boost::mutex> bufferLock(this->m_schemaMutex);
         return this->m_columnDefs;
     }
 
-    void cancel();    
+    void cancel();
     bool isCancelled(){return this->m_bCancel;};
     bool hasSchemaChanged(){return this->m_bHasSchemaChanged;};
     int32_t getCoordinationId(){ return this->m_coordinationId;}
@@ -101,7 +117,7 @@ class DrillClientQueryResult{
     exec::shared::QueryId& getQueryId(){ return *(this->m_pQueryId); }
     bool hasError(){ return m_bHasError;}
     status_t getErrorStatus(){ return m_pError!=NULL?(status_t)m_pError->status:QRY_SUCCESS;}
-    DrillClientError* getError(){ return m_pError;}
+    const DrillClientError* getError(){ return m_pError;}
 
     private:
     status_t setupColumnDefs(exec::user::QueryResult* pQueryResult);
@@ -114,20 +130,21 @@ class DrillClientQueryResult{
     DrillClientImpl* m_pClient;
 
     int32_t m_coordinationId;
+    size_t m_numBatches; // number of record batches received so far
 
     // Vector of Buffers holding data returned by the server
     // Each data buffer is decoded into a RecordBatch
     std::vector<ByteBuf_t> m_dataBuffers;
     std::queue<RecordBatch*> m_recordBatches;
-    std::vector<Drill::FieldMetadata*> m_columnDefs;
+    FieldDefPtr m_columnDefs;
 
     // Mutex to protect schema definitions
-    boost::mutex m_schemaMutex; 
+    boost::mutex m_schemaMutex;
     // Mutex for Cond variable for read write to batch vector
-    boost::mutex m_cvMutex; 
-    // Condition variable to signal arrival of more data. Condition variable is signaled 
+    boost::mutex m_cvMutex;
+    // Condition variable to signal arrival of more data. Condition variable is signaled
     // if the recordBatches queue is not empty
-    boost::condition_variable m_cv; 
+    boost::condition_variable m_cv;
 
     // state
     // if m_bIsQueryPending is true, we continue to wait for results
@@ -138,10 +155,12 @@ class DrillClientQueryResult{
     bool m_bHasData;
     bool m_bHasError;
 
-    DrillClientError* m_pError;
+    const DrillClientError* m_pError;
 
     exec::shared::QueryId* m_pQueryId;
 
+    //Schema change listener
+    pfnSchemaListener m_pSchemaListener;
     // Results callback
     pfnQueryResultsListener m_pResultsListener;
 
@@ -152,33 +171,42 @@ class DrillClientQueryResult{
 class DrillClientImpl{
     public:
         DrillClientImpl():
-            m_coordinationId(1) ,
+            m_coordinationId(1),
+            m_handshakeVersion(0),
             m_bIsConnected(false),
-            m_pendingRequests(0), 
-            m_pError(NULL), 
-            m_pListenerThread(NULL), 
-            m_socket(m_io_service), 
-            m_rbuf(1024), 
-            m_wbuf(1024)
+            m_pendingRequests(0),
+            m_pError(NULL),
+            m_pListenerThread(NULL),
+            m_socket(m_io_service),
+            m_deadlineTimer(m_io_service),
+            m_rbuf(NULL),
+            m_wbuf(MAX_SOCK_RD_BUFSIZE)
     {
         srand(time(NULL));
         m_coordinationId=rand()%1729+1;
     };
 
-        ~DrillClientImpl(){ 
-            //TODO: Cleanup. 
+        ~DrillClientImpl(){
+            //TODO: Cleanup.
             //Free any record batches or buffers remaining
             //Cancel any pending requests
             //Clear and destroy DrillClientQueryResults vector?
 
+            m_deadlineTimer.cancel();
+            m_io_service.stop();
+            m_socket.close();
+            if(m_rbuf!=NULL){
+                Utils::freeBuffer(m_rbuf); m_rbuf=NULL;
+            }
+            if(m_pError!=NULL){
+                delete m_pError; m_pError=NULL;
+            }
             //Terminate and free the listener thread
             if(this->m_pListenerThread!=NULL){
                 this->m_pListenerThread->interrupt();
+                this->m_pListenerThread->join();
                 delete this->m_pListenerThread;
             }
-            if(m_pError!=NULL){
-                delete m_pError; m_pError=NULL;
-            }
         };
 
         //Connect via Zookeeper or directly
@@ -187,10 +215,9 @@ class DrillClientImpl{
         bool Active();
         void Close() ;
         DrillClientError* getError(){ return m_pError;}
-        DrillClientQueryResult* SubmitQuery(exec::user::QueryType t, const std::string& plan, pfnQueryResultsListener listener, void* listenerCtx);
+        DrillClientQueryResult* SubmitQuery(::exec::shared::QueryType t, const std::string& plan, pfnQueryResultsListener listener, void* listenerCtx);
         void waitForResults();
-        bool ValidateHandShake(); // throw expection if not valid
-
+        bool validateHandShake();
 
     private:
         friend class DrillClientQueryResult;
@@ -201,31 +228,30 @@ class DrillClientImpl{
             }
         };
 
-        //allocate memory for Record Batches
-        ByteBuf_t allocateBuffer(size_t len){
-            ByteBuf_t b = (ByteBuf_t)malloc(len); memset(b, 0, len); return b;
-        }
-        void freeBuffer(ByteBuf_t b){ free(b); }
-
         // Direct connection to a drillbit
         // host can be name or ip address, port can be port number or name of service in /etc/services
-        connectionStatus_t connect(const char* host, const char* port);       
+        connectionStatus_t connect(const char* host, const char* port);
         int32_t getNextCoordinationId(){ return ++m_coordinationId; };
-        void parseConnectStr(const char* connectStr, std::string& protocol, std::string& hostPortStr);
-        // end and receive synchronous messages
-        void recvSync(InBoundRpcMessage& msg);
-        void sendSync(OutBoundRpcMessage& msg);
-
+        void parseConnectStr(const char* connectStr, std::string& pathToDrill, std::string& protocol, std::string& hostPortStr);
+        // send synchronous messages
+        //connectionStatus_t recvSync(InBoundRpcMessage& msg);
+        connectionStatus_t sendSync(OutBoundRpcMessage& msg);
+        // handshake
+        connectionStatus_t recvHandshake();
+        void handleHandshake(ByteBuf_t b, const boost::system::error_code& err, std::size_t bytes_transferred );
+        void handleHShakeReadTimeout(const boost::system::error_code & err);
+        // Query results
         void getNextResult();
-        status_t readMsg(ByteBuf_t _buf, InBoundRpcMessage& msg, boost::system::error_code& error);
-        status_t processQueryResult(InBoundRpcMessage& msg);
-        status_t processQueryId(InBoundRpcMessage& msg );
+        status_t readMsg(ByteBuf_t _buf, ByteBuf_t* allocatedBuffer, InBoundRpcMessage& msg, boost::system::error_code& error);
+        status_t processQueryResult(ByteBuf_t allocatedBuffer, InBoundRpcMessage& msg);
+        status_t processQueryId(ByteBuf_t allocatedBuffer, InBoundRpcMessage& msg );
+        void handleReadTimeout(const boost::system::error_code & err);
         void handleRead(ByteBuf_t _buf, const boost::system::error_code & err, size_t bytes_transferred) ;
         status_t validateMessage(InBoundRpcMessage& msg, exec::user::QueryResult& qr, std::string& valError);
         connectionStatus_t handleConnError(connectionStatus_t status, std::string msg);
         status_t handleQryError(status_t status, std::string msg, DrillClientQueryResult* pQueryResult);
-        status_t handleQryError(status_t status, 
-                const exec::shared::DrillPBError& e, 
+        status_t handleQryError(status_t status,
+                const exec::shared::DrillPBError& e,
                 DrillClientQueryResult* pQueryResult);
         void broadcastError(DrillClientError* pErr);
         void clearMapEntries(DrillClientQueryResult* pQueryResult);
@@ -237,29 +263,31 @@ class DrillClientImpl{
         static RpcDecoder s_decoder;
 
         int32_t m_coordinationId;
+        int32_t m_handshakeVersion;
         bool m_bIsConnected;
 
-        // number of outstanding read requests. 
+        // number of outstanding read requests.
         // handleRead will keep asking for more results as long as this number is not zero.
-        size_t m_pendingRequests;   
+        size_t m_pendingRequests;
 
-        // Error Object. NULL if no error. Set if the error is valid for ALL running queries. 
+        // Error Object. NULL if no error. Set if the error is valid for ALL running queries.
         // All the query result objects will
         // also have the error object set.
-        // If the error is query specific, only the query results object will have the error set. 
+        // If the error is query specific, only the query results object will have the error set.
         DrillClientError* m_pError;
 
-        // for boost asio 
+        // for boost asio
         boost::thread * m_pListenerThread;
         boost::asio::io_service m_io_service;
         boost::asio::ip::tcp::socket m_socket;
+        boost::asio::deadline_timer m_deadlineTimer; // to timeout async queries that never return
 
         //for synchronous messages, like validate handshake
-        DataBuf m_rbuf; // buffer for receiving synchronous messages
+        ByteBuf_t m_rbuf; // buffer for receiving synchronous messages
         DataBuf m_wbuf; // buffer for sending synchronous message
 
-        // Mutex to protect drill client operations 
-        boost::mutex m_dcMutex; 
+        // Mutex to protect drill client operations
+        boost::mutex m_dcMutex;
 
         // Map of coordination id to  Query Ids.
         std::map<int, DrillClientQueryResult*> m_queryIds;
@@ -283,12 +311,12 @@ inline void DrillClientImpl::Close() {
 
 class ZookeeperImpl{
     public:
-        ZookeeperImpl(); 
-        ~ZookeeperImpl(); 
+        ZookeeperImpl();
+        ~ZookeeperImpl();
         static ZooLogLevel getZkLogLevel();
-        // comma separated host:port pairs, each corresponding to a zk 
+        // comma separated host:port pairs, each corresponding to a zk
         // server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002
-        int connectToZookeeper(const char* connectStr);
+        int connectToZookeeper(const char* connectStr, const char* pathToDrill);
         void close();
         static void watcher(zhandle_t *zzh, int type, int state, const char *path, void* context);
         void debugPrint();
@@ -297,16 +325,17 @@ class ZookeeperImpl{
 
     private:
         static char s_drillRoot[];
-        zhandle_t* m_zh;      
+        static char s_defaultCluster[];
+        zhandle_t* m_zh;
         clientid_t m_id;
         int m_state;
         std::string m_err;
 
         struct String_vector* m_pDrillbits;
 
-        boost::mutex m_cvMutex; 
-        // Condition variable to signal connection callback has been processed 
-        boost::condition_variable m_cv; 
+        boost::mutex m_cvMutex;
+        // Condition variable to signal connection callback has been processed
+        boost::condition_variable m_cv;
         bool m_bConnecting;
         exec::DrillServiceInstance m_drillServiceInstance;
 };

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/errmsgs.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/errmsgs.cpp b/contrib/native/client/src/clientlib/errmsgs.cpp
index 0b74225..966cfc2 100644
--- a/contrib/native/client/src/clientlib/errmsgs.cpp
+++ b/contrib/native/client/src/clientlib/errmsgs.cpp
@@ -29,6 +29,8 @@ static Drill::ErrorMessages errorMessages[]={
     {ERR_CONN_FAILURE, ERR_CATEGORY_CONN, 0, "Connection failure. Host:%s port:%s. Error: %s."},
     {ERR_CONN_EXCEPT, ERR_CATEGORY_CONN, 0, "Socket connection failure with the following exception: %s."},
     {ERR_CONN_UNKPROTO, ERR_CATEGORY_CONN, 0, "Unknown protocol: %s."},
+    {ERR_CONN_RDFAIL, ERR_CATEGORY_CONN, 0, "A socket read failed with error: %s."},
+    {ERR_CONN_WFAIL, ERR_CATEGORY_CONN, 0, "Synchronous socket write failed with error: %s."},
     {ERR_CONN_ZOOKEEPER, ERR_CATEGORY_CONN, 0, "Zookeeper error. %s"},
     {ERR_CONN_NOHSHAKE, ERR_CATEGORY_CONN, 0, "Handshake failed: Expected RPC version %d, got %d."},
     {ERR_CONN_ZKFAIL, ERR_CATEGORY_CONN, 0, "Failed to connect to Zookeeper."},
@@ -45,7 +47,7 @@ static Drill::ErrorMessages errorMessages[]={
     {ERR_QRY_INVRPCTYPE, ERR_CATEGORY_QRY, 0, "Unknown rpc type received from server:%d."},
     {ERR_QRY_OUTOFORDER, ERR_CATEGORY_QRY, 0, "Internal Error: Query result received before query id. Aborting ..."},
     {ERR_QRY_INVRPC, ERR_CATEGORY_QRY, 0, "Rpc Error: %s."},
-    {ERR_QRY_8, ERR_CATEGORY_QRY, 0, "Query Failed."},
+    {ERR_QRY_TIMOUT, ERR_CATEGORY_QRY, 0, "Timed out waiting for server to respond."},
     {ERR_QRY_FAILURE, ERR_CATEGORY_QRY, 0, "Query execution error. Details:[ \n%s\n]"},
     {ERR_QRY_SELVEC2, ERR_CATEGORY_QRY, 0, "Receiving a selection_vector_2 from the server came as a complete surprise at this point"},
     {ERR_QRY_RESPFAIL, ERR_CATEGORY_QRY, 0, "Got a RESPONSE_FAILURE from the server and don't know what to do"},
@@ -66,7 +68,7 @@ std::string getMessage(uint32_t msgId, ...){
     assert(msgId <= ERR_QRY_MAX);
     va_list args;
     va_start (args, msgId);
-    vsprintf (str, errorMessages[msgId].msgFormatStr, args);
+    vsprintf (str, errorMessages[msgId-DRILL_ERR_START].msgFormatStr, args);
     va_end (args);
     s=std::string("[")+boost::lexical_cast<std::string>(msgId)+std::string("]")+str;
     return s;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/errmsgs.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/errmsgs.hpp b/contrib/native/client/src/clientlib/errmsgs.hpp
index e5ea3ce..437335c 100644
--- a/contrib/native/client/src/clientlib/errmsgs.hpp
+++ b/contrib/native/client/src/clientlib/errmsgs.hpp
@@ -27,33 +27,37 @@ namespace Drill{
 #define ERR_CATEGORY_CONN 10000
 #define ERR_CATEGORY_QRY 20000
 
-#define ERR_NONE 0
-
-#define ERR_CONN_FAILURE        1
-#define ERR_CONN_EXCEPT         2
-#define ERR_CONN_UNKPROTO       3
-#define ERR_CONN_ZOOKEEPER      4
-#define ERR_CONN_NOHSHAKE       5
-#define ERR_CONN_ZKFAIL         6
-#define ERR_CONN_ZKTIMOUT       7
-#define ERR_CONN_ZKERR          8
-#define ERR_CONN_ZKDBITERR      9
-#define ERR_CONN_ZKNODBIT       10
-#define ERR_CONN_ZKNOAUTH       11
-#define ERR_CONN_ZKEXP          12
-#define ERR_CONN_MAX            12
-
-#define ERR_QRY_OUTOFMEM    ERR_CONN_MAX+1 
+
+#define DRILL_ERR_START         30000 // arbitrary
+#define ERR_NONE                DRILL_ERR_START+0
+
+#define ERR_CONN_FAILURE        DRILL_ERR_START+1
+#define ERR_CONN_EXCEPT         DRILL_ERR_START+2
+#define ERR_CONN_UNKPROTO       DRILL_ERR_START+3
+#define ERR_CONN_RDFAIL         DRILL_ERR_START+4
+#define ERR_CONN_WFAIL          DRILL_ERR_START+5
+#define ERR_CONN_ZOOKEEPER      DRILL_ERR_START+6
+#define ERR_CONN_NOHSHAKE       DRILL_ERR_START+7
+#define ERR_CONN_ZKFAIL         DRILL_ERR_START+8
+#define ERR_CONN_ZKTIMOUT       DRILL_ERR_START+9
+#define ERR_CONN_ZKERR          DRILL_ERR_START+10
+#define ERR_CONN_ZKDBITERR      DRILL_ERR_START+11
+#define ERR_CONN_ZKNODBIT       DRILL_ERR_START+12
+#define ERR_CONN_ZKNOAUTH       DRILL_ERR_START+13
+#define ERR_CONN_ZKEXP          DRILL_ERR_START+14
+#define ERR_CONN_MAX            DRILL_ERR_START+14
+
+#define ERR_QRY_OUTOFMEM    ERR_CONN_MAX+1
 #define ERR_QRY_COMMERR     ERR_CONN_MAX+2
 #define ERR_QRY_INVREADLEN  ERR_CONN_MAX+3
 #define ERR_QRY_INVQUERYID  ERR_CONN_MAX+4
 #define ERR_QRY_INVRPCTYPE  ERR_CONN_MAX+5
 #define ERR_QRY_OUTOFORDER  ERR_CONN_MAX+6
 #define ERR_QRY_INVRPC      ERR_CONN_MAX+7
-#define ERR_QRY_8           ERR_CONN_MAX+8
+#define ERR_QRY_TIMOUT      ERR_CONN_MAX+8
 #define ERR_QRY_FAILURE     ERR_CONN_MAX+9
 #define ERR_QRY_SELVEC2     ERR_CONN_MAX+10
-#define ERR_QRY_RESPFAIL    ERR_CONN_MAX+11 
+#define ERR_QRY_RESPFAIL    ERR_CONN_MAX+11
 #define ERR_QRY_12          ERR_CONN_MAX+12
 #define ERR_QRY_UNKQRY      ERR_CONN_MAX+13
 #define ERR_QRY_CANCELED    ERR_CONN_MAX+14
@@ -63,7 +67,7 @@ namespace Drill{
 #define ERR_QRY_18          ERR_CONN_MAX+18
 #define ERR_QRY_19          ERR_CONN_MAX+19
 #define ERR_QRY_20          ERR_CONN_MAX+20
-#define ERR_QRY_MAX         ERR_QRY_20 
+#define ERR_QRY_MAX         ERR_QRY_20
 
     // Use only Plain Old Data types in this struc. We will declare
     // a global.

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/logger.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/logger.cpp b/contrib/native/client/src/clientlib/logger.cpp
new file mode 100644
index 0000000..5411d01
--- /dev/null
+++ b/contrib/native/client/src/clientlib/logger.cpp
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "boost/date_time/posix_time/posix_time.hpp"
+#include "boost/thread.hpp"
+
+#include "logger.hpp"
+
+namespace Drill{
+
+std::string getTime(){
+    return to_simple_string(boost::posix_time::second_clock::local_time());
+}
+
+std::string getTid(){
+    return boost::lexical_cast<std::string>(boost::this_thread::get_id());
+}
+
+logLevel_t Logger::s_level=LOG_ERROR;
+std::ostream* Logger::s_pOutStream=NULL;
+std::ofstream* Logger::s_pOutFileStream=NULL;
+char* Logger::s_filepath=NULL;
+
+void Logger::init(const char* path){
+    if(path!=NULL) {
+        s_pOutFileStream = new std::ofstream;
+        s_pOutFileStream->open(path, std::ofstream::out);
+        if(!s_pOutFileStream->is_open()){
+            std::cerr << "Logfile could not be opened. Logging to stdout" << std::endl;
+        }
+    }
+    s_pOutStream=(s_pOutFileStream!=NULL && s_pOutFileStream->is_open())?s_pOutFileStream:&std::cout;
+}
+
+void Logger::close(){
+    if(s_pOutFileStream !=NULL){
+        if(s_pOutFileStream->is_open()){
+            s_pOutFileStream->close();
+        }
+        delete s_pOutFileStream; s_pOutFileStream=NULL;
+    }
+}
+
+std::ostream& Logger::log(logLevel_t level){
+    *s_pOutStream << getTime();
+    *s_pOutStream << " : "<<levelAsString(level);
+    *s_pOutStream << " : "<<getTid();
+    *s_pOutStream << " : ";
+    return *s_pOutStream;
+}
+
+
+} // namespace Drill
+

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/logger.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/logger.hpp b/contrib/native/client/src/clientlib/logger.hpp
new file mode 100644
index 0000000..e3edb13
--- /dev/null
+++ b/contrib/native/client/src/clientlib/logger.hpp
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LOGGER_H
+#define __LOGGER_H
+
+#include <sstream>
+#include <ostream>
+#include <fstream>
+#include <string>
+#include <stdio.h>
+
+#include "drill/common.hpp"
+
+namespace Drill{
+
+class Logger{
+    public:
+        Logger(){}
+        ~Logger(){ }
+
+        static void init(const char* path);
+        static void close();
+        static std::ostream& log(logLevel_t level);
+        static std::string levelAsString(logLevel_t level) {
+            static const char* const levelNames[] = {
+                "TRACE",
+                "DEBUG",
+                "INFO",
+                "WARNING",
+                "ERROR",
+                "FATAL"
+            };
+            return levelNames[level];
+        }
+
+        // The logging level
+        static logLevel_t s_level;
+        static std::ostream* s_pOutStream;
+
+    private:
+        //static std::ostream* s_pOutStream;
+        static std::ofstream* s_pOutFileStream;
+        static char* s_filepath;
+
+}; // Logger
+
+std::string getTime();
+std::string getTid();
+
+#define DRILL_LOG(level) \
+    if (Logger::s_pOutStream==NULL || level < Drill::Logger::s_level); \
+    else Drill::Logger::log(level)       \
+
+} // namespace Drill
+
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/recordBatch.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/recordBatch.cpp b/contrib/native/client/src/clientlib/recordBatch.cpp
index 066b898..27a592d 100644
--- a/contrib/native/client/src/clientlib/recordBatch.cpp
+++ b/contrib/native/client/src/clientlib/recordBatch.cpp
@@ -16,11 +16,9 @@
  * limitations under the License.
  */
 
-
-#include <boost/log/trivial.hpp>
-
 #include "drill/common.hpp"
 #include "drill/recordBatch.hpp"
+#include "utils.hpp"
 
 const uint32_t YEARS_TO_MONTHS=12;
 const uint32_t HOURS_TO_MILLIS=60*60*1000;
@@ -33,14 +31,14 @@ static char timezoneMap[][36]={
     "Africa/Abidjan", "Africa/Accra", "Africa/Addis_Ababa", "Africa/Algiers", "Africa/Asmara", "Africa/Asmera",
     "Africa/Bamako", "Africa/Bangui", "Africa/Banjul", "Africa/Bissau", "Africa/Blantyre", "Africa/Brazzaville",
     "Africa/Bujumbura", "Africa/Cairo", "Africa/Casablanca", "Africa/Ceuta", "Africa/Conakry", "Africa/Dakar",
-    "Africa/Dar_es_Salaam", "Africa/Djibouti", "Africa/Douala", "Africa/El_Aaiun", 
+    "Africa/Dar_es_Salaam", "Africa/Djibouti", "Africa/Douala", "Africa/El_Aaiun",
     "Africa/Freetown", "Africa/Gaborone",
     "Africa/Harare", "Africa/Johannesburg", "Africa/Juba", "Africa/Kampala", "Africa/Khartoum", "Africa/Kigali",
     "Africa/Kinshasa", "Africa/Lagos", "Africa/Libreville", "Africa/Lome", "Africa/Luanda", "Africa/Lubumbashi",
     "Africa/Lusaka", "Africa/Malabo", "Africa/Maputo", "Africa/Maseru", "Africa/Mbabane", "Africa/Mogadishu",
-    "Africa/Monrovia", "Africa/Nairobi", "Africa/Ndjamena", "Africa/Niamey", 
+    "Africa/Monrovia", "Africa/Nairobi", "Africa/Ndjamena", "Africa/Niamey",
     "Africa/Nouakchott", "Africa/Ouagadougou",
-    "Africa/Porto-Novo", "Africa/Sao_Tome", "Africa/Timbuktu", "Africa/Tripoli", 
+    "Africa/Porto-Novo", "Africa/Sao_Tome", "Africa/Timbuktu", "Africa/Tripoli",
     "Africa/Tunis", "Africa/Windhoek",
     "America/Adak", "America/Anchorage", "America/Anguilla", "America/Antigua", "America/Araguaina",
     "America/Argentina/Buenos_Aires", "America/Argentina/Catamarca", "America/Argentina/ComodRivadavia",
@@ -57,10 +55,10 @@ static char timezoneMap[][36]={
     "America/Edmonton", "America/Eirunepe", "America/El_Salvador", "America/Ensenada", "America/Fort_Wayne",
     "America/Fortaleza", "America/Glace_Bay", "America/Godthab", "America/Goose_Bay", "America/Grand_Turk",
     "America/Grenada", "America/Guadeloupe", "America/Guatemala", "America/Guayaquil", "America/Guyana",
-    "America/Halifax", "America/Havana", "America/Hermosillo", 
+    "America/Halifax", "America/Havana", "America/Hermosillo",
     "America/Indiana/Indianapolis", "America/Indiana/Knox",
     "America/Indiana/Marengo", "America/Indiana/Petersburg", "America/Indiana/Tell_City",
-    "America/Indiana/Vevay", "America/Indiana/Vincennes", "America/Indiana/Winamac", 
+    "America/Indiana/Vevay", "America/Indiana/Vincennes", "America/Indiana/Winamac",
     "America/Indianapolis", "America/Inuvik",
     "America/Iqaluit", "America/Jamaica", "America/Jujuy", "America/Juneau", "America/Kentucky/Louisville",
     "America/Kentucky/Monticello", "America/Knox_IN", "America/Kralendijk", "America/La_Paz", "America/Lima",
@@ -68,23 +66,23 @@ static char timezoneMap[][36]={
     "America/Manaus", "America/Marigot", "America/Martinique", "America/Matamoros", "America/Mazatlan",
     "America/Mendoza", "America/Menominee", "America/Merida", "America/Metlakatla", "America/Mexico_City",
     "America/Miquelon", "America/Moncton", "America/Monterrey", "America/Montevideo", "America/Montreal",
-    "America/Montserrat", "America/Nassau", "America/New_York", "America/Nipigon", 
+    "America/Montserrat", "America/Nassau", "America/New_York", "America/Nipigon",
     "America/Nome", "America/Noronha",
     "America/North_Dakota/Beulah", "America/North_Dakota/Center", "America/North_Dakota/New_Salem",
     "America/Ojinaga", "America/Panama", "America/Pangnirtung",
     "America/Paramaribo", "America/Phoenix", "America/Port-au-Prince",
     "America/Port_of_Spain", "America/Porto_Acre", "America/Porto_Velho",
     "America/Puerto_Rico", "America/Rainy_River", "America/Rankin_Inlet",
-    "America/Recife", "America/Regina", "America/Resolute", "America/Rio_Branco", 
+    "America/Recife", "America/Regina", "America/Resolute", "America/Rio_Branco",
     "America/Rosario", "America/Santa_Isabel",
     "America/Santarem", "America/Santiago", "America/Santo_Domingo",
-    "America/Sao_Paulo", "America/Scoresbysund", "America/Shiprock", "America/Sitka", 
+    "America/Sao_Paulo", "America/Scoresbysund", "America/Shiprock", "America/Sitka",
     "America/St_Barthelemy", "America/St_Johns",
     "America/St_Kitts", "America/St_Lucia", "America/St_Thomas",
     "America/St_Vincent", "America/Swift_Current", "America/Tegucigalpa",
-    "America/Thule", "America/Thunder_Bay", "America/Tijuana", "America/Toronto", 
+    "America/Thule", "America/Thunder_Bay", "America/Tijuana", "America/Toronto",
     "America/Tortola", "America/Vancouver",
-    "America/Virgin", "America/Whitehorse", "America/Winnipeg", "America/Yakutat", 
+    "America/Virgin", "America/Whitehorse", "America/Winnipeg", "America/Yakutat",
     "America/Yellowknife", "Antarctica/Casey",
     "Antarctica/Davis", "Antarctica/DumontDUrville", "Antarctica/Macquarie",
     "Antarctica/Mawson", "Antarctica/McMurdo", "Antarctica/Palmer",
@@ -104,33 +102,33 @@ static char timezoneMap[][36]={
     "Asia/Shanghai", "Asia/Singapore", "Asia/Taipei", "Asia/Tashkent", "Asia/Tbilisi", "Asia/Tehran",
     "Asia/Tel_Aviv", "Asia/Thimbu", "Asia/Thimphu", "Asia/Tokyo", "Asia/Ujung_Pandang", "Asia/Ulaanbaatar",
     "Asia/Ulan_Bator", "Asia/Urumqi", "Asia/Vientiane", "Asia/Vladivostok", "Asia/Yakutsk", "Asia/Yekaterinburg",
-    "Asia/Yerevan", "Atlantic/Azores", "Atlantic/Bermuda", "Atlantic/Canary", 
+    "Asia/Yerevan", "Atlantic/Azores", "Atlantic/Bermuda", "Atlantic/Canary",
     "Atlantic/Cape_Verde", "Atlantic/Faeroe",
     "Atlantic/Faroe", "Atlantic/Jan_Mayen", "Atlantic/Madeira",
     "Atlantic/Reykjavik", "Atlantic/South_Georgia", "Atlantic/St_Helena",
-    "Atlantic/Stanley", "Australia/ACT", "Australia/Adelaide", "Australia/Brisbane", 
+    "Atlantic/Stanley", "Australia/ACT", "Australia/Adelaide", "Australia/Brisbane",
     "Australia/Broken_Hill", "Australia/Canberra",
-    "Australia/Currie", "Australia/Darwin", "Australia/Eucla", "Australia/Hobart", 
+    "Australia/Currie", "Australia/Darwin", "Australia/Eucla", "Australia/Hobart",
     "Australia/LHI", "Australia/Lindeman",
-    "Australia/Lord_Howe", "Australia/Melbourne", "Australia/NSW", "Australia/North", 
+    "Australia/Lord_Howe", "Australia/Melbourne", "Australia/NSW", "Australia/North",
     "Australia/Perth", "Australia/Queensland",
-    "Australia/South", "Australia/Sydney", "Australia/Tasmania", "Australia/Victoria", 
+    "Australia/South", "Australia/Sydney", "Australia/Tasmania", "Australia/Victoria",
     "Australia/West", "Australia/Yancowinna",
     "Brazil/Acre", "Brazil/DeNoronha", "Brazil/East", "Brazil/West", "CET", "CST6CDT",
-    "Canada/Atlantic", "Canada/Central", "Canada/East-Saskatchewan", "Canada/Eastern", 
+    "Canada/Atlantic", "Canada/Central", "Canada/East-Saskatchewan", "Canada/Eastern",
     "Canada/Mountain", "Canada/Newfoundland",
     "Canada/Pacific", "Canada/Saskatchewan", "Canada/Yukon", "Chile/Continental", "Chile/EasterIsland", "Cuba",
     "EET", "EST", "EST5EDT", "Egypt", "Eire", "Etc/GMT", "Etc/GMT+0", "Etc/GMT+1", "Etc/GMT+10",
-    "Etc/GMT+11", "Etc/GMT+12", "Etc/GMT+2", "Etc/GMT+3", "Etc/GMT+4", "Etc/GMT+5", "Etc/GMT+6", 
+    "Etc/GMT+11", "Etc/GMT+12", "Etc/GMT+2", "Etc/GMT+3", "Etc/GMT+4", "Etc/GMT+5", "Etc/GMT+6",
     "Etc/GMT+7", "Etc/GMT+8",
-    "Etc/GMT+9", "Etc/GMT-0", "Etc/GMT-1", "Etc/GMT-10", "Etc/GMT-11", "Etc/GMT-12", 
+    "Etc/GMT+9", "Etc/GMT-0", "Etc/GMT-1", "Etc/GMT-10", "Etc/GMT-11", "Etc/GMT-12",
     "Etc/GMT-13", "Etc/GMT-14", "Etc/GMT-2",
-    "Etc/GMT-3", "Etc/GMT-4", "Etc/GMT-5", "Etc/GMT-6", "Etc/GMT-7", "Etc/GMT-8", 
+    "Etc/GMT-3", "Etc/GMT-4", "Etc/GMT-5", "Etc/GMT-6", "Etc/GMT-7", "Etc/GMT-8",
     "Etc/GMT-9", "Etc/GMT0", "Etc/Greenwich",
     "Etc/UCT", "Etc/UTC", "Etc/Universal", "Etc/Zulu", "Europe/Amsterdam", "Europe/Andorra",
     "Europe/Athens", "Europe/Belfast", "Europe/Belgrade", "Europe/Berlin", "Europe/Bratislava", "Europe/Brussels",
     "Europe/Bucharest", "Europe/Budapest", "Europe/Chisinau",
-    "Europe/Copenhagen", "Europe/Dublin", "Europe/Gibraltar", "Europe/Guernsey", 
+    "Europe/Copenhagen", "Europe/Dublin", "Europe/Gibraltar", "Europe/Guernsey",
     "Europe/Helsinki", "Europe/Isle_of_Man",
     "Europe/Istanbul", "Europe/Jersey", "Europe/Kaliningrad", "Europe/Kiev", "Europe/Lisbon", "Europe/Ljubljana",
     "Europe/London", "Europe/Luxembourg", "Europe/Madrid", "Europe/Malta", "Europe/Mariehamn", "Europe/Minsk",
@@ -145,20 +143,20 @@ static char timezoneMap[][36]={
     "Indian/Mayotte", "Indian/Reunion", "Iran", "Israel", "Jamaica", "Japan", "Kwajalein", "Libya", "MET",
     "MST", "MST7MDT", "Mexico/BajaNorte", "Mexico/BajaSur", "Mexico/General", "NZ", "NZ-CHAT", "Navajo", "PRC",
     "PST8PDT", "Pacific/Apia", "Pacific/Auckland", "Pacific/Chatham", "Pacific/Chuuk", "Pacific/Easter",
-    "Pacific/Efate", "Pacific/Enderbury", "Pacific/Fakaofo", "Pacific/Fiji", 
+    "Pacific/Efate", "Pacific/Enderbury", "Pacific/Fakaofo", "Pacific/Fiji",
     "Pacific/Funafuti", "Pacific/Galapagos",
-    "Pacific/Gambier", "Pacific/Guadalcanal", "Pacific/Guam", "Pacific/Honolulu", 
+    "Pacific/Gambier", "Pacific/Guadalcanal", "Pacific/Guam", "Pacific/Honolulu",
     "Pacific/Johnston", "Pacific/Kiritimati",
-    "Pacific/Kosrae", "Pacific/Kwajalein", "Pacific/Majuro", "Pacific/Marquesas", 
+    "Pacific/Kosrae", "Pacific/Kwajalein", "Pacific/Majuro", "Pacific/Marquesas",
     "Pacific/Midway", "Pacific/Nauru",
-    "Pacific/Niue", "Pacific/Norfolk", "Pacific/Noumea", "Pacific/Pago_Pago", 
+    "Pacific/Niue", "Pacific/Norfolk", "Pacific/Noumea", "Pacific/Pago_Pago",
     "Pacific/Palau", "Pacific/Pitcairn",
-    "Pacific/Pohnpei", "Pacific/Ponape", "Pacific/Port_Moresby", "Pacific/Rarotonga", 
+    "Pacific/Pohnpei", "Pacific/Ponape", "Pacific/Port_Moresby", "Pacific/Rarotonga",
     "Pacific/Saipan", "Pacific/Samoa",
     "Pacific/Tahiti", "Pacific/Tarawa", "Pacific/Tongatapu", "Pacific/Truk", "Pacific/Wake", "Pacific/Wallis",
     "Pacific/Yap", "Poland", "Portugal", "ROC", "ROK", "Singapore", "Turkey", "UCT", "US/Alaska", "US/Aleutian",
     "US/Arizona", "US/Central", "US/East-Indiana", "US/Eastern", "US/Hawaii", "US/Indiana-Starke",
-    "US/Michigan", "US/Mountain", "US/Pacific", "US/Pacific-New", "US/Samoa", 
+    "US/Michigan", "US/Mountain", "US/Pacific", "US/Pacific-New", "US/Samoa",
     "UTC", "Universal", "W-SU", "WET", "Zulu"
 };
 
@@ -214,12 +212,12 @@ ValueVectorBase* ValueVectorFactory::allocateValueVector(const Drill::FieldMetad
                 case common::BIT:
                     return new ValueVectorBit(b,f.getValueCount());
                 case common::VARBINARY:
-                    return new ValueVectorVarBinary(b, f.getValueCount()); 
+                    return new ValueVectorVarBinary(b, f.getValueCount());
                 case common::VARCHAR:
-                    return new ValueVectorVarChar(b, f.getValueCount()); 
+                    return new ValueVectorVarChar(b, f.getValueCount());
                 case common::MONEY:
                 default:
-                    return new ValueVectorUnimplemented(b, f.getValueCount()); 
+                    return new ValueVectorUnimplemented(b, f.getValueCount());
             }
         case common::DM_OPTIONAL:
             switch (type) {
@@ -236,28 +234,28 @@ ValueVectorBase* ValueVectorFactory::allocateValueVector(const Drill::FieldMetad
                 case common::FLOAT8:
                     return new NullableValueVectorFixed<double>(b,f.getValueCount());
                 case common::DATE:
-                    return new NullableValueVectorTyped<DateHolder, 
+                    return new NullableValueVectorTyped<DateHolder,
                            ValueVectorTyped<DateHolder, uint64_t> >(b,f.getValueCount());
                 case common::TIMESTAMP:
-                    return new NullableValueVectorTyped<DateTimeHolder, 
+                    return new NullableValueVectorTyped<DateTimeHolder,
                            ValueVectorTyped<DateTimeHolder, uint64_t> >(b,f.getValueCount());
                 case common::TIME:
                     return new NullableValueVectorTyped<TimeHolder,
                            ValueVectorTyped<TimeHolder, uint32_t> >(b,f.getValueCount());
                 case common::TIMESTAMPTZ:
-                    return new NullableValueVectorTyped<DateTimeTZHolder, 
+                    return new NullableValueVectorTyped<DateTimeTZHolder,
                            ValueVectorTypedComposite<DateTimeTZHolder> >(b,f.getValueCount());
                 case common::INTERVAL:
-                    return new NullableValueVectorTyped<IntervalHolder, 
+                    return new NullableValueVectorTyped<IntervalHolder,
                            ValueVectorTypedComposite<IntervalHolder> >(b,f.getValueCount());
                 case common::INTERVALDAY:
-                    return new NullableValueVectorTyped<IntervalDayHolder, 
+                    return new NullableValueVectorTyped<IntervalDayHolder,
                            ValueVectorTypedComposite<IntervalDayHolder> >(b,f.getValueCount());
                 case common::INTERVALYEAR:
-                    return new NullableValueVectorTyped<IntervalYearHolder, 
+                    return new NullableValueVectorTyped<IntervalYearHolder,
                            ValueVectorTypedComposite<IntervalYearHolder> >(b,f.getValueCount());
                 case common::BIT:
-                    return new NullableValueVectorTyped<uint8_t, 
+                    return new NullableValueVectorTyped<uint8_t,
                            ValueVectorBit >(b,f.getValueCount());
                 case common::VARBINARY:
                     //TODO: Varbinary is untested
@@ -266,16 +264,16 @@ ValueVectorBase* ValueVectorFactory::allocateValueVector(const Drill::FieldMetad
                     return new NullableValueVectorTyped<VarWidthHolder, ValueVectorVarChar >(b,f.getValueCount());
                     // not implemented yet
                 default:
-                    return new ValueVectorUnimplemented(b, f.getValueCount()); 
+                    return new ValueVectorUnimplemented(b, f.getValueCount());
             }
         case common::DM_REPEATED:
             switch (type) {
                 // not implemented yet
                 default:
-                    return new ValueVectorUnimplemented(b, f.getValueCount()); 
+                    return new ValueVectorUnimplemented(b, f.getValueCount());
             }
     }
-    return new ValueVectorUnimplemented(b, f.getValueCount()); 
+    return new ValueVectorUnimplemented(b, f.getValueCount());
 }
 
 
@@ -285,13 +283,28 @@ ret_t FieldBatch::load(){
     return RET_SUCCESS;
 }
 
+RecordBatch::~RecordBatch(){
+    m_buffer=NULL;
+    //free memory allocated for FieldBatch objects saved in m_fields;
+    for(std::vector<FieldBatch*>::iterator it = m_fields.begin(); it != m_fields.end(); ++it){
+        delete *it;
+    }
+    m_fields.clear();
+    for(std::vector<Drill::FieldMetadata*>::iterator it = m_fieldDefs->begin(); it != m_fieldDefs->end(); ++it){
+        delete *it;
+    }
+    m_fieldDefs->clear();
+    delete m_pQueryResult;
+    Utils::freeBuffer(m_allocatedBuffer);
+}
+
 ret_t RecordBatch::build(){
     // For every Field, get the corresponding SlicedByteBuf.
-    // Create a Materialized field. Set the Sliced Byted Buf to the correct slice. 
+    // Create a Materialized field. Set the Sliced Byted Buf to the correct slice.
     // Set the Field Metadata.
     // Load the vector.(Load creates a valuevector object of the correct type:
-    //    Use ValueVectorFactory(type) to create the right type. 
-    //    Create a Value Vector of the Sliced Byte Buf. 
+    //    Use ValueVectorFactory(type) to create the right type.
+    //    Create a Value Vector of the Sliced Byte Buf.
     // Add the field batch to vector
     size_t startOffset=0;
     //TODO: handle schema changes here. Call a client provided callback?
@@ -305,7 +318,7 @@ ret_t RecordBatch::build(){
         startOffset+=len;
         pField->load(); // set up the value vectors
         this->m_fields.push_back(pField);
-        this->m_fieldDefs.push_back(pFmd);
+        this->m_fieldDefs->push_back(pFmd);
     }
     return RET_SUCCESS;
 }
@@ -317,7 +330,7 @@ void RecordBatch::print(std::ostream& s, size_t num){
         std::string name= fmd.getName();
         nameList+=name;
         nameList+="    ";
-    } 
+    }
     size_t numToPrint=this->m_numRecords;
     if(num>0 && num<numToPrint)numToPrint=num;
     s<<nameList<<std::endl;
@@ -335,7 +348,7 @@ void RecordBatch::print(std::ostream& s, size_t num){
             }
             values+=valueBuf;
             values+="    ";
-        } 
+        }
         s<<values<<std::endl;
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/rpcDecoder.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/rpcDecoder.cpp b/contrib/native/client/src/clientlib/rpcDecoder.cpp
index 6fb9c24..c1001fd 100644
--- a/contrib/native/client/src/clientlib/rpcDecoder.cpp
+++ b/contrib/native/client/src/clientlib/rpcDecoder.cpp
@@ -48,6 +48,7 @@ int RpcDecoder::LengthDecode(const uint8_t* buf, uint32_t* p_length) {
     cerr << "Reading full length " << *p_length << endl;
     #endif
     assert( (pos1-pos0) == getRawVarintSize(*p_length));
+    delete cis;
     return (pos1-pos0);
 }
 
@@ -143,6 +144,7 @@ int RpcDecoder::Decode(const uint8_t* buf, int length, InBoundRpcMessage& msg) {
 
     int pos1 = cis->CurrentPosition();
     assert((pos1-pos0) == length);
+    delete cis;
     return (pos1-pos0);
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/rpcEncoder.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/rpcEncoder.cpp b/contrib/native/client/src/clientlib/rpcEncoder.cpp
index 5d28a33..2f354d7 100644
--- a/contrib/native/client/src/clientlib/rpcEncoder.cpp
+++ b/contrib/native/client/src/clientlib/rpcEncoder.cpp
@@ -99,6 +99,9 @@ bool RpcEncoder::Encode(DataBuf& buf, OutBoundRpcMessage& msg) {
     cos->WriteVarint32(proto_body_length);
     msg.m_pbody->SerializeToCodedStream(cos);
 
+    delete os;
+    delete cos;
+
     // Done! no read to write data body for client
     return true;
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/utils.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/utils.hpp b/contrib/native/client/src/clientlib/utils.hpp
new file mode 100644
index 0000000..9def9b4
--- /dev/null
+++ b/contrib/native/client/src/clientlib/utils.hpp
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __UTILS_H
+#define __UTILS_H
+
+#include <sstream>
+#include <ostream>
+#include <fstream>
+#include <string>
+#include <stdlib.h>
+
+#include "drill/common.hpp"
+
+namespace Drill{
+
+class Utils{
+    public:
+
+        //allocate memory for Record Batches
+        static ByteBuf_t allocateBuffer(size_t len){
+            //http://stackoverflow.com/questions/2688466/why-mallocmemset-is-slower-than-calloc
+            ByteBuf_t b = (ByteBuf_t)calloc(len, sizeof(Byte_t)); return b;
+        }
+        static void freeBuffer(ByteBuf_t b){ free(b); }
+
+}; // Utils
+
+
+} // namespace Drill
+
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/include/drill/common.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/include/drill/common.hpp b/contrib/native/client/src/include/drill/common.hpp
index 4368058..2113ce5 100644
--- a/contrib/native/client/src/include/drill/common.hpp
+++ b/contrib/native/client/src/include/drill/common.hpp
@@ -23,13 +23,19 @@
 #include <stdint.h>
 #include <string>
 #include <vector>
+#include <boost/shared_ptr.hpp>
+
+#define DRILL_RPC_VERSION 1
 
 #define LENGTH_PREFIX_MAX_LENGTH 5
 #define LEN_PREFIX_BUFLEN LENGTH_PREFIX_MAX_LENGTH
 
+#define MAX_CONNECT_STR 4096
+#define MAX_SOCK_RD_BUFSIZE  1024
+
 #ifdef _DEBUG
 #define EXTRA_DEBUGGING
-#define CODER_DEBUGGING 
+#define CODER_DEBUGGING
 #endif
 
 namespace Drill {
@@ -39,12 +45,15 @@ typedef std::vector<uint8_t> DataBuf;
 typedef uint8_t Byte_t;
 typedef Byte_t * ByteBuf_t;
 
+class FieldMetadata;
+typedef boost::shared_ptr< std::vector<Drill::FieldMetadata*> > FieldDefPtr;
+
 typedef enum{
-    QRY_SUCCESS=0, 
-    QRY_FAILURE=1, 
-    QRY_SUCCESS_WITH_INFO=2, 
-    QRY_NO_MORE_DATA=3, 
-    QRY_CANCEL=4, 
+    QRY_SUCCESS=0,
+    QRY_FAILURE=1,
+    QRY_SUCCESS_WITH_INFO=2,
+    QRY_NO_MORE_DATA=3,
+    QRY_CANCEL=4,
     QRY_OUT_OF_BOUNDS=5,
     QRY_CLIENT_OUTOFMEM=6,
     QRY_INTERNAL_ERROR=7,
@@ -52,8 +61,8 @@ typedef enum{
 } status_t;
 
 typedef enum{
-    CONN_SUCCESS=0, 
-    CONN_FAILURE=1, 
+    CONN_SUCCESS=0,
+    CONN_FAILURE=1,
     CONN_HANDSHAKE_FAILED=2,
     CONN_INVALID_INPUT=3,
     CONN_ZOOKEEPER_ERROR=4

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/include/drill/drillClient.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/include/drill/drillClient.hpp b/contrib/native/client/src/include/drill/drillClient.hpp
index 0e85dcc..6d59afb 100644
--- a/contrib/native/client/src/include/drill/drillClient.hpp
+++ b/contrib/native/client/src/include/drill/drillClient.hpp
@@ -21,8 +21,6 @@
 #define DRILL_CLIENT_H
 
 #include <vector>
-#include <boost/log/core.hpp>
-#include <boost/log/trivial.hpp>
 #include <boost/thread.hpp>
 #include "drill/common.hpp"
 #include "drill/protobuf/User.pb.h"
@@ -42,7 +40,7 @@
   #if __GNUC__ >= 4
     #define DECLSPEC_DRILL_CLIENT __attribute__ ((visibility ("default")))
   #else
-    #define DECLSPEC_DRILL_CLIENT 
+    #define DECLSPEC_DRILL_CLIENT
   #endif
 #endif
 
@@ -65,7 +63,7 @@ class DECLSPEC_DRILL_CLIENT DrillClientError{
 
         static DrillClientError*  getErrorObject(const exec::shared::DrillPBError& e);
 
-        // To get the error number we add a error range start number to 
+        // To get the error number we add a error range start number to
         // the status code returned (either status_t or connectionStatus_t)
         uint32_t status; // could be either status_t or connectionStatus_t
         uint32_t errnum;
@@ -83,10 +81,13 @@ class DECLSPEC_DRILL_CLIENT DrillClientInitializer{
 class DECLSPEC_DRILL_CLIENT DrillClientConfig{
     public:
         DrillClientConfig();
+        ~DrillClientConfig();
         static void initLogging(const char* path);
         static void setLogLevel(logLevel_t l);
         static void setBufferLimit(uint64_t l);
         static uint64_t getBufferLimit();
+        static void setSocketTimeout(int32_t l);
+        static int32_t getSocketTimeout();
         static logLevel_t getLogLevel();
     private:
         // The logging level
@@ -94,7 +95,9 @@ class DECLSPEC_DRILL_CLIENT DrillClientConfig{
         // The total amount of memory to be allocated by an instance of DrillClient.
         // For future use. Currently, not enforced.
         static uint64_t s_bufferLimit;
-        static boost::mutex s_mutex; 
+        // Timeout (in seconds) for asynchronous read operations. Default is 180 seconds
+        static int32_t s_socketTimeout;
+        static boost::mutex s_mutex;
 };
 
 
@@ -104,8 +107,8 @@ class DECLSPEC_DRILL_CLIENT DrillClientConfig{
 typedef void* QueryHandle_t;
 
 /*
- * Query Results listener callback. This function is called for every record batch after it has 
- * been received and decoded. The listener function should return a status. 
+ * Query Results listener callback. This function is called for every record batch after it has
+ * been received and decoded. The listener function should return a status.
  * If the listener returns failure, the query will be canceled.
  *
  * DrillClientQueryResult will hold a listener & listener contxt for the call back function
@@ -114,13 +117,13 @@ typedef status_t (*pfnQueryResultsListener)(QueryHandle_t ctx, RecordBatch* b, D
 
 /*
  * The schema change listener callback. This function is called if the record batch detects a
- * change in the schema. The client application can call getColDefs in the RecordIterator or 
+ * change in the schema. The client application can call getColDefs in the RecordIterator or
  * get the field information from the RecordBatch itself and handle the change appropriately.
  */
-typedef uint32_t (*pfnSchemaListener)(void* ctx, SchemaDef* s, DrillClientError* err);
+typedef status_t (*pfnSchemaListener)(void* ctx, FieldDefPtr f, DrillClientError* err);
 
-/* 
- * A Record Iterator instance is returned by the SubmitQuery class. Calls block until some data 
+/*
+ * A Record Iterator instance is returned by the SubmitQuery class. Calls block until some data
  * is available, or until all data has been returned.
  */
 
@@ -129,12 +132,12 @@ class DECLSPEC_DRILL_CLIENT RecordIterator{
     public:
 
     ~RecordIterator();
-    /* 
-     * Returns a vector of column(i.e. field) definitions. The returned reference is guaranteed to be valid till the 
-     * end of the query or until a schema change event is received. If a schema change event is received by the 
-     * application, the application should discard the reference it currently holds and call this function again. 
+    /*
+     * Returns a vector of column(i.e. field) definitions. The returned reference is guaranteed to be valid till the
+     * end of the query or until a schema change event is received. If a schema change event is received by the
+     * application, the application should discard the reference it currently holds and call this function again.
      */
-    std::vector<Drill::FieldMetadata*>& getColDefs();
+    FieldDefPtr getColDefs();
 
     /* Move the current pointer to the next record. */
     status_t next();
@@ -148,26 +151,29 @@ class DECLSPEC_DRILL_CLIENT RecordIterator{
     /* Cancels the query. */
     status_t cancel();
 
-    void registerSchemaChangeListener(pfnSchemaListener* l);
+    /*  Returns true is the schem has changed from the previous record. Returns false for the first record. */
+    bool hasSchemaChanged();
+
+    void registerSchemaChangeListener(pfnSchemaListener l);
 
     /*
      * Returns the last error message
      */
-    std::string& getError();
+    const std::string& getError();
 
     private:
     RecordIterator(DrillClientQueryResult* pResult){
         this->m_currentRecord=-1;
         this->m_pCurrentRecordBatch=NULL;
         this->m_pQueryResult=pResult;
-        m_pColDefs=NULL;
+        //m_pColDefs=NULL;
     }
 
     DrillClientQueryResult* m_pQueryResult;
     size_t m_currentRecord;
     RecordBatch* m_pCurrentRecordBatch;
-    boost::mutex m_recordBatchMutex; 
-    std::vector<Drill::FieldMetadata*>* m_pColDefs; // Copy of the latest column defs made from the 
+    boost::mutex m_recordBatchMutex;
+    FieldDefPtr m_pColDefs; // Copy of the latest column defs made from the
     // first record batch with this definition
 };
 
@@ -189,19 +195,19 @@ class DECLSPEC_DRILL_CLIENT DrillClient{
         void close() ;
 
         /*
-         * Submit a query asynchronously and wait for results to be returned thru a callback. A query context handle is passed 
+         * Submit a query asynchronously and wait for results to be returned thru a callback. A query context handle is passed
          * back. The listener callback will return the handle in the ctx parameter.
          */
-        status_t submitQuery(exec::user::QueryType t, const std::string& plan, pfnQueryResultsListener listener, void* listenerCtx, QueryHandle_t* qHandle);
+        status_t submitQuery(::exec::shared::QueryType t, const std::string& plan, pfnQueryResultsListener listener, void* listenerCtx, QueryHandle_t* qHandle);
 
         /*
          * Submit a query asynchronously and wait for results to be returned thru an iterator that returns
          * results synchronously. The client app needs to call delete on the iterator when done.
          */
-        RecordIterator* submitQuery(exec::user::QueryType t, const std::string& plan, DrillClientError* err);
+        RecordIterator* submitQuery(::exec::shared::QueryType t, const std::string& plan, DrillClientError* err);
 
-        /* 
-         * The client application should call this function to wait for results if it has registered a 
+        /*
+         * The client application should call this function to wait for results if it has registered a
          * listener.
          */
         void waitForResults();
@@ -212,10 +218,21 @@ class DECLSPEC_DRILL_CLIENT DrillClient{
         std::string& getError();
 
         /*
-         * Applications using the async query submit method should call freeQueryResources to free up resources 
+         * Applications using the async query submit method can register a listener for schema changes
+         *
+         */
+        void registerSchemaChangeListener(QueryHandle_t* handle, pfnSchemaListener l);
+
+        /*
+         * Applications using the async query submit method should call freeQueryResources to free up resources
          * once the query is no longer being processed.
-         * */
+         */
         void freeQueryResources(QueryHandle_t* handle);
+
+        /*
+         * Applications using the sync query submit method should call freeQueryIterator to free up resources
+         * once the RecordIterator is no longer being processed.
+         */
         void freeQueryIterator(RecordIterator** pIter){ delete *pIter; *pIter=NULL;};
 
     private:

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/include/drill/protobuf/User.pb.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/include/drill/protobuf/User.pb.h b/contrib/native/client/src/include/drill/protobuf/User.pb.h
index 3ed0249..982d34f 100644
--- a/contrib/native/client/src/include/drill/protobuf/User.pb.h
+++ b/contrib/native/client/src/include/drill/protobuf/User.pb.h
@@ -96,26 +96,6 @@ inline bool RpcType_Parse(
   return ::google::protobuf::internal::ParseNamedEnum<RpcType>(
     RpcType_descriptor(), name, value);
 }
-enum QueryType {
-  SQL = 1,
-  LOGICAL = 2,
-  PHYSICAL = 3
-};
-bool QueryType_IsValid(int value);
-const QueryType QueryType_MIN = SQL;
-const QueryType QueryType_MAX = PHYSICAL;
-const int QueryType_ARRAYSIZE = QueryType_MAX + 1;
-
-const ::google::protobuf::EnumDescriptor* QueryType_descriptor();
-inline const ::std::string& QueryType_Name(QueryType value) {
-  return ::google::protobuf::internal::NameOfEnum(
-    QueryType_descriptor(), value);
-}
-inline bool QueryType_Parse(
-    const ::std::string& name, QueryType* value) {
-  return ::google::protobuf::internal::ParseNamedEnum<QueryType>(
-    QueryType_descriptor(), name, value);
-}
 enum QueryResultsMode {
   STREAM_FULL = 1
 };
@@ -604,12 +584,12 @@ class RunQuery : public ::google::protobuf::Message {
   inline ::exec::user::QueryResultsMode results_mode() const;
   inline void set_results_mode(::exec::user::QueryResultsMode value);
 
-  // optional .exec.user.QueryType type = 2;
+  // optional .exec.shared.QueryType type = 2;
   inline bool has_type() const;
   inline void clear_type();
   static const int kTypeFieldNumber = 2;
-  inline ::exec::user::QueryType type() const;
-  inline void set_type(::exec::user::QueryType value);
+  inline ::exec::shared::QueryType type() const;
+  inline void set_type(::exec::shared::QueryType value);
 
   // optional string plan = 3;
   inline bool has_plan() const;
@@ -1456,7 +1436,7 @@ inline void RunQuery::set_results_mode(::exec::user::QueryResultsMode value) {
   results_mode_ = value;
 }
 
-// optional .exec.user.QueryType type = 2;
+// optional .exec.shared.QueryType type = 2;
 inline bool RunQuery::has_type() const {
   return (_has_bits_[0] & 0x00000002u) != 0;
 }
@@ -1470,11 +1450,11 @@ inline void RunQuery::clear_type() {
   type_ = 1;
   clear_has_type();
 }
-inline ::exec::user::QueryType RunQuery::type() const {
-  return static_cast< ::exec::user::QueryType >(type_);
+inline ::exec::shared::QueryType RunQuery::type() const {
+  return static_cast< ::exec::shared::QueryType >(type_);
 }
-inline void RunQuery::set_type(::exec::user::QueryType value) {
-  assert(::exec::user::QueryType_IsValid(value));
+inline void RunQuery::set_type(::exec::shared::QueryType value) {
+  assert(::exec::shared::QueryType_IsValid(value));
   set_has_type();
   type_ = value;
 }
@@ -1927,10 +1907,6 @@ inline const EnumDescriptor* GetEnumDescriptor< ::exec::user::RpcType>() {
   return ::exec::user::RpcType_descriptor();
 }
 template <>
-inline const EnumDescriptor* GetEnumDescriptor< ::exec::user::QueryType>() {
-  return ::exec::user::QueryType_descriptor();
-}
-template <>
 inline const EnumDescriptor* GetEnumDescriptor< ::exec::user::QueryResultsMode>() {
   return ::exec::user::QueryResultsMode_descriptor();
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/include/drill/recordBatch.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/include/drill/recordBatch.hpp b/contrib/native/client/src/include/drill/recordBatch.hpp
index c40327b..4ed1e31 100644
--- a/contrib/native/client/src/include/drill/recordBatch.hpp
+++ b/contrib/native/client/src/include/drill/recordBatch.hpp
@@ -21,6 +21,7 @@
 
 
 #include <assert.h>
+#include <math.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <ostream>
@@ -45,7 +46,7 @@
   #if __GNUC__ >= 4
     #define DECLSPEC_DRILL_CLIENT __attribute__ ((visibility ("default")))
   #else
-    #define DECLSPEC_DRILL_CLIENT 
+    #define DECLSPEC_DRILL_CLIENT
   #endif
 #endif
 
@@ -55,7 +56,7 @@ namespace Drill {
 class FieldBatch;
 class ValueVectorBase;
 
-//TODO: The base classes for value vectors should have abstract functions instead of implementations 
+//TODO: The base classes for value vectors should have abstract functions instead of implementations
 //that return 'NOT IMPLEMENTED YET'
 
 // A Read Only Sliced byte buffer
@@ -111,7 +112,7 @@ class SlicedByteBuf{
         ByteBuf_t getSliceStart(){ return this->m_buffer+this->m_start;}
 
         //    accessor functions
-        //  
+        //
         //    TYPE getTYPE(size_t index){
         //    if(index>=m_length) return 0;
         //      return (TYPE) m_buffer[offset+index];
@@ -122,8 +123,8 @@ class SlicedByteBuf{
             // Type T can only be an integer type
             // Type T cannot be a struct of fixed size
             // Because struct alignment is compiler dependent
-            // we can end up with a struct size that is larger 
-            // than the buffer in the sliced buf.  
+            // we can end up with a struct size that is larger
+            // than the buffer in the sliced buf.
             assert((index + sizeof(T) <= this->m_length));
             if(index + sizeof(T) <= this->m_length)
                 return *((T*)(this->m_buffer+this->m_start+index));
@@ -145,7 +146,7 @@ class SlicedByteBuf{
 
         ByteBuf_t getAt(size_t index){
             return this->m_buffer+m_start+index;
-        } 
+        }
 
         bool getBit(size_t index){
             // refer to BitVector.java http://bit.ly/Py1jof
@@ -202,7 +203,7 @@ class DECLSPEC_DRILL_CLIENT ValueVectorUnimplemented:public ValueVectorBase{
         const char* get(size_t index) const { return 0;};
         virtual void getValueAt(size_t index, char* buf, size_t nChars) const{
             *buf=0; return;
-        } 
+        }
 
         virtual uint32_t getSize(size_t index) const{ return 0;};
 
@@ -284,7 +285,7 @@ class DECLSPEC_DRILL_CLIENT ValueVectorBit:public ValueVectorFixedWidth{
 template <int DECIMAL_DIGITS, int WIDTH_IN_BYTES, bool IS_SPARSE, int MAX_PRECISION = 0 >
     class ValueVectorDecimal: public ValueVectorFixedWidth {
         public:
-            ValueVectorDecimal(SlicedByteBuf* b, size_t rowCount, int32_t scale): 
+            ValueVectorDecimal(SlicedByteBuf* b, size_t rowCount, int32_t scale):
                 ValueVectorFixedWidth(b, rowCount),
                 m_scale(scale)
         {
@@ -319,7 +320,7 @@ template <int DECIMAL_DIGITS, int WIDTH_IN_BYTES, bool IS_SPARSE, int MAX_PRECIS
 template<typename VALUE_TYPE>
     class ValueVectorDecimalTrivial: public ValueVectorFixedWidth {
         public:
-            ValueVectorDecimalTrivial(SlicedByteBuf* b, size_t rowCount, int32_t scale): 
+            ValueVectorDecimalTrivial(SlicedByteBuf* b, size_t rowCount, int32_t scale):
                 ValueVectorFixedWidth(b, rowCount),
                 m_scale(scale)
         {
@@ -329,7 +330,7 @@ template<typename VALUE_TYPE>
             DecimalValue get(size_t index) const {
                 return DecimalValue(
                         m_pBuffer->readAt<VALUE_TYPE>(index * sizeof(VALUE_TYPE)),
-                        m_scale); 
+                        m_scale);
             }
 
             void getValueAt(size_t index, char* buf, size_t nChars) const {
@@ -355,7 +356,7 @@ template <typename VALUE_TYPE>
 {
     public:
         NullableValueVectorFixed(SlicedByteBuf *b, size_t rowCount):ValueVectorBase(b, rowCount){
-            size_t offsetEnd = rowCount/8 + 1; 
+            size_t offsetEnd = (size_t)ceil(rowCount/8.0);
             this->m_pBitmap= new SlicedByteBuf(*b, 0, offsetEnd);
             this->m_pData= new SlicedByteBuf(*b, offsetEnd, b->getLength());
             // TODO: testing boundary case(null columns)
@@ -372,7 +373,7 @@ template <typename VALUE_TYPE>
         }
 
         VALUE_TYPE get(size_t index) const {
-            // it should not be called if the value is null 
+            // it should not be called if the value is null
             assert( "value is null" && !isNull(index));
             return m_pData->readAt<VALUE_TYPE>(index * sizeof(VALUE_TYPE));
         }
@@ -390,14 +391,14 @@ template <typename VALUE_TYPE>
             return sizeof(VALUE_TYPE);
         }
     private:
-        SlicedByteBuf* m_pBitmap; 
+        SlicedByteBuf* m_pBitmap;
         SlicedByteBuf* m_pData;
 };
 
 // The 'holder' classes are (by contract) simple structs with primitive members and no dynamic allocations.
-// The template classes create an instance of the class and return it to the caller in the 'get' routines. 
-// The compiler will create a copy and return it to the caller. If the object is more complex than a struct of 
-// primitives, the class _must_ provide a copy constructor. 
+// The template classes create an instance of the class and return it to the caller in the 'get' routines.
+// The compiler will create a copy and return it to the caller. If the object is more complex than a struct of
+// primitives, the class _must_ provide a copy constructor.
 // We don't really need a destructor here, but we declare a virtual dtor in the base class in case we ever get
 // more complex and start doing dynamic allocations in these classes.
 
@@ -490,11 +491,11 @@ struct IntervalHolder{
 };
 
 /*
- * VALUEHOLDER_CLASS_TYPE is a struct with a constructor that takes a parameter of type VALUE_VECTOR_TYPE 
+ * VALUEHOLDER_CLASS_TYPE is a struct with a constructor that takes a parameter of type VALUE_VECTOR_TYPE
  * (a primitive type)
  * VALUEHOLDER_CLASS_TYPE implements a toString function
- * Note that VALUEHOLDER_CLASS_TYPE is created on the stack and the copy returned in the get function. 
- * So the class needs to have the appropriate copy constructor or the default bitwise copy should work 
+ * Note that VALUEHOLDER_CLASS_TYPE is created on the stack and the copy returned in the get function.
+ * So the class needs to have the appropriate copy constructor or the default bitwise copy should work
  * correctly.
  */
 template <class VALUEHOLDER_CLASS_TYPE, typename VALUE_TYPE>
@@ -552,7 +553,7 @@ template <class VALUEHOLDER_CLASS_TYPE, class VALUE_VECTOR_TYPE>
         public:
 
             NullableValueVectorTyped(SlicedByteBuf *b, size_t rowCount):ValueVectorBase(b, rowCount){
-                size_t offsetEnd = rowCount/8 + 1; 
+                size_t offsetEnd = (size_t)ceil(rowCount/8.0);
                 this->m_pBitmap= new SlicedByteBuf(*b, 0, offsetEnd);
                 this->m_pData= new SlicedByteBuf(*b, offsetEnd, b->getLength()-offsetEnd);
                 this->m_pVector= new VALUE_VECTOR_TYPE(m_pData, rowCount);
@@ -575,7 +576,7 @@ template <class VALUEHOLDER_CLASS_TYPE, class VALUE_VECTOR_TYPE>
 
             void getValueAt(size_t index, char* buf, size_t nChars) const{
                 std::stringstream sstr;
-                if(this->isNull(index)){ 
+                if(this->isNull(index)){
                     sstr<<"NULL";
                     strncpy(buf, sstr.str().c_str(), nChars);
                 }else{
@@ -589,7 +590,7 @@ template <class VALUEHOLDER_CLASS_TYPE, class VALUE_VECTOR_TYPE>
             }
 
         private:
-            SlicedByteBuf* m_pBitmap; 
+            SlicedByteBuf* m_pBitmap;
             SlicedByteBuf* m_pData;
             VALUE_VECTOR_TYPE* m_pVector;
     };
@@ -617,10 +618,10 @@ class DECLSPEC_DRILL_CLIENT ValueVectorVarWidth:public ValueVectorBase{
             size_t endIdx = this->m_pOffsetArray->getUint32((index+1)*sizeof(uint32_t));
             size_t length = endIdx - startIdx;
             assert(length >= 0);
-            // Return an object created on the stack. The compiler will return a 
-            // copy and destroy the stack object. The optimizer will hopefully 
+            // Return an object created on the stack. The compiler will return a
+            // copy and destroy the stack object. The optimizer will hopefully
             // elide this so we can return an object with no extra memory allocation
-            // and no copies.(SEE: http://en.wikipedia.org/wiki/Return_value_optimization) 
+            // and no copies.(SEE: http://en.wikipedia.org/wiki/Return_value_optimization)
             VarWidthHolder dst;
             dst.data=this->m_pData->getSliceStart()+startIdx;
             dst.size=length;
@@ -673,9 +674,9 @@ class DECLSPEC_DRILL_CLIENT ValueVectorVarBinary:public ValueVectorVarWidth{
         }
 };
 //
-//TODO: For windows, we have to export instantiations of the template class. 
+//TODO: For windows, we have to export instantiations of the template class.
 //see: http://msdn.microsoft.com/en-us/library/twa2aw10.aspx
-//for example: 
+//for example:
 //template class __declspec(dllexport) B<int>;
 //class __declspec(dllexport) D : public B<int> { }
 //
@@ -686,7 +687,7 @@ typedef NullableValueVectorTyped<int, ValueVectorBit > NullableValueVectorBit;
 // Aliases for Decimal Types:
 // The definitions for decimal digits, width, max precision are defined in
 // /exec/java-exec/src/main/codegen/data/ValueVectorTypes.tdd
-// 
+//
 // Decimal9 and Decimal18 could be optimized, maybe write seperate classes?
 typedef ValueVectorDecimalTrivial<int32_t> ValueVectorDecimal9;
 typedef ValueVectorDecimalTrivial<int64_t> ValueVectorDecimal18;
@@ -778,7 +779,7 @@ class FieldBatch{
         ret_t load();
 
         const ValueVectorBase * getVector(){
-            return m_pValueVector; 
+            return m_pValueVector;
         }
 
     private:
@@ -795,33 +796,27 @@ class ValueVectorFactory{
 
 class DECLSPEC_DRILL_CLIENT RecordBatch{
     public:
-        RecordBatch(exec::user::QueryResult* pResult, ByteBuf_t b){
-            m_pQueryResult=pResult;      
+
+        //m_allocatedBuffer is the memory block allocated to hold the incoming RPC message. Record BAtches operate on
+        //slices of the allcoated buffer. The first slice (the first Field Batch), begins at m_buffer. Data in the
+        //allocated buffer before m_buffer is mostly the RPC header, and the QueryResult object.
+        RecordBatch(exec::user::QueryResult* pResult, ByteBuf_t r, ByteBuf_t b)
+                :m_fieldDefs(new(std::vector<Drill::FieldMetadata*>)){
+            m_pQueryResult=pResult;
             m_pRecordBatchDef=&pResult->def();
             m_numRecords=pResult->row_count();
+            m_allocatedBuffer=r;
             m_buffer=b;
             m_numFields=pResult->def().field_size();
             m_bHasSchemaChanged=false;
         }
 
-        ~RecordBatch(){
-            m_buffer=NULL;
-            //free memory allocated for FieldBatch objects saved in m_fields;
-            for(std::vector<FieldBatch*>::iterator it = m_fields.begin(); it != m_fields.end(); ++it){
-                delete *it;    
-            }
-            m_fields.clear();
-            for(std::vector<Drill::FieldMetadata*>::iterator it = m_fieldDefs.begin(); it != m_fieldDefs.end(); ++it){
-                delete *it;    
-            }
-            m_fieldDefs.clear();
-            delete m_pQueryResult;
-        }
+        ~RecordBatch();
 
         // get the ith field metadata
         const Drill::FieldMetadata& getFieldMetadata(size_t index){
             //return this->m_pRecordBatchDef->field(index);
-            return *(m_fieldDefs[index]); 
+            return *(m_fieldDefs->at(index));
         }
 
         size_t getNumRecords(){ return m_numRecords;}
@@ -829,13 +824,13 @@ class DECLSPEC_DRILL_CLIENT RecordBatch{
         size_t getNumFields() { return m_pRecordBatchDef->field_size(); }
         bool isLastChunk() { return m_pQueryResult->is_last_chunk(); }
 
-        std::vector<Drill::FieldMetadata*>& getColumnDefs(){ return m_fieldDefs;}
+        boost::shared_ptr<std::vector<Drill::FieldMetadata*> > getColumnDefs(){ return m_fieldDefs;}
 
-        // 
+        //
         // build the record batch: i.e. fill up the value vectors from the buffer.
-        // On fetching the data from the server, the caller creates a RecordBatch 
-        // object then calls build() to build the value vectors.The caller saves the 
-        // Record Batch and is responsible for freeing both the RecordBatch and the 
+        // On fetching the data from the server, the caller creates a RecordBatch
+        // object then calls build() to build the value vectors.The caller saves the
+        // Record Batch and is responsible for freeing both the RecordBatch and the
         // raw buffer memory
         //
         ret_t build();
@@ -843,7 +838,7 @@ class DECLSPEC_DRILL_CLIENT RecordBatch{
         void print(std::ostream& s, size_t num);
 
         const ValueVectorBase * getVector(size_t index){
-            return m_fields[index]->getVector(); 
+            return m_fields[index]->getVector();
         }
 
         void schemaChanged(bool b){
@@ -858,9 +853,10 @@ class DECLSPEC_DRILL_CLIENT RecordBatch{
     private:
         const exec::user::QueryResult* m_pQueryResult;
         const exec::shared::RecordBatchDef* m_pRecordBatchDef;
+        ByteBuf_t m_allocatedBuffer;
         ByteBuf_t m_buffer;
         //build the current schema out of the field metadata
-        std::vector<Drill::FieldMetadata*> m_fieldDefs;
+        FieldDefPtr m_fieldDefs;
         std::vector<FieldBatch*> m_fields;
         size_t m_numFields;
         size_t m_numRecords;


[29/32] DRILL-1024: Move hive storage code out of 'exec/java-exec' into 'contrib/storage-hive' module.

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
new file mode 100644
index 0000000..c062f8c
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
@@ -0,0 +1,542 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive;
+
+import java.io.IOException;
+import java.sql.Timestamp;
+import java.sql.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.drill.common.exceptions.DrillRuntimeException;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.expr.TypeHelper;
+import org.apache.drill.exec.ops.FragmentContext;
+import org.apache.drill.exec.physical.impl.OutputMutator;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.store.RecordReader;
+import org.apache.drill.exec.vector.BigIntVector;
+import org.apache.drill.exec.vector.BitVector;
+import org.apache.drill.exec.vector.Float4Vector;
+import org.apache.drill.exec.vector.Float8Vector;
+import org.apache.drill.exec.vector.IntVector;
+import org.apache.drill.exec.vector.TimeStampVector;
+import org.apache.drill.exec.vector.DateVector;
+import org.apache.drill.exec.vector.SmallIntVector;
+import org.apache.drill.exec.vector.TinyIntVector;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.VarBinaryVector;
+import org.apache.drill.exec.vector.VarCharVector;
+import org.apache.drill.exec.vector.allocator.VectorAllocator;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
+import org.apache.hadoop.hive.serde2.SerDe;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.Reporter;
+
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+
+import com.google.common.collect.Lists;
+
+public class HiveRecordReader implements RecordReader {
+
+  protected Table table;
+  protected Partition partition;
+  protected InputSplit inputSplit;
+  protected FragmentContext context;
+  protected List<SchemaPath> projectedColumns;
+  protected List<String> selectedColumnNames;
+  protected List<TypeInfo> selectedColumnTypes = Lists.newArrayList();
+  protected List<ObjectInspector> selectedColumnObjInspectors = Lists.newArrayList();
+  protected List<HiveFieldConverter> selectedColumnFieldConverters = Lists.newArrayList();
+  protected List<String> selectedPartitionNames = Lists.newArrayList();
+  protected List<TypeInfo> selectedPartitionTypes = Lists.newArrayList();
+  protected List<Object> selectedPartitionValues = Lists.newArrayList();
+  protected List<String> tableColumns; // all columns in table (not including partition columns)
+  protected SerDe serde;
+  protected StructObjectInspector sInspector;
+  protected Object key, value;
+  protected org.apache.hadoop.mapred.RecordReader reader;
+  protected List<ValueVector> vectors = Lists.newArrayList();
+  protected List<ValueVector> pVectors = Lists.newArrayList();
+  protected Object redoRecord;
+  protected boolean empty;
+
+  protected static final int TARGET_RECORD_COUNT = 4000;
+  protected static final int FIELD_SIZE = 50;
+
+  public HiveRecordReader(Table table, Partition partition, InputSplit inputSplit, List<SchemaPath> projectedColumns,
+      FragmentContext context) throws ExecutionSetupException {
+    this.table = table;
+    this.partition = partition;
+    this.inputSplit = inputSplit;
+    this.context = context;
+    this.projectedColumns = projectedColumns;
+    this.empty = (inputSplit == null && partition == null);
+    init();
+  }
+
+  private void init() throws ExecutionSetupException {
+    Properties properties;
+    JobConf job = new JobConf();
+    if (partition != null) {
+      properties = MetaStoreUtils.getPartitionMetadata(partition, table);
+
+      // SerDe expects properties from Table, but above call doesn't add Table properties.
+      // Include Table properties in final list in order to not to break SerDes that depend on
+      // Table properties. For example AvroSerDe gets the schema from properties (passed as second argument)
+      for (Map.Entry<String, String> entry : table.getParameters().entrySet()) {
+        if (entry.getKey() != null && entry.getKey() != null) {
+          properties.put(entry.getKey(), entry.getValue());
+        }
+      }
+    } else {
+      properties = MetaStoreUtils.getTableMetadata(table);
+    }
+    for (Object obj : properties.keySet()) {
+      job.set((String) obj, (String) properties.get(obj));
+    }
+    InputFormat format;
+    String sLib = (partition == null) ? table.getSd().getSerdeInfo().getSerializationLib() : partition.getSd().getSerdeInfo().getSerializationLib();
+    String inputFormatName = (partition == null) ? table.getSd().getInputFormat() : partition.getSd().getInputFormat();
+    try {
+      format = (InputFormat) Class.forName(inputFormatName).getConstructor().newInstance();
+      Class c = Class.forName(sLib);
+      serde = (SerDe) c.getConstructor().newInstance();
+      serde.initialize(job, properties);
+    } catch (ReflectiveOperationException | SerDeException e) {
+      throw new ExecutionSetupException("Unable to instantiate InputFormat", e);
+    }
+    job.setInputFormat(format.getClass());
+
+    List<FieldSchema> partitionKeys = table.getPartitionKeys();
+    List<String> partitionNames = Lists.newArrayList();
+    for (FieldSchema field : partitionKeys) {
+      partitionNames.add(field.getName());
+    }
+
+    try {
+      ObjectInspector oi = serde.getObjectInspector();
+      if (oi.getCategory() != ObjectInspector.Category.STRUCT) {
+        throw new UnsupportedOperationException(String.format("%s category not supported", oi.getCategory()));
+      }
+      sInspector = (StructObjectInspector) oi;
+      StructTypeInfo sTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromObjectInspector(sInspector);
+      if (projectedColumns == null) {
+        selectedColumnNames = sTypeInfo.getAllStructFieldNames();
+        tableColumns = selectedColumnNames;
+      } else {
+        tableColumns = sTypeInfo.getAllStructFieldNames();
+        List<Integer> columnIds = Lists.newArrayList();
+        selectedColumnNames = Lists.newArrayList();
+        for (SchemaPath field : projectedColumns) {
+          String columnName = field.getRootSegment().getPath(); //TODO?
+          if (!tableColumns.contains(columnName)) {
+            if (partitionNames.contains(columnName)) {
+              selectedPartitionNames.add(columnName);
+            } else {
+              throw new ExecutionSetupException(String.format("Column %s does not exist", columnName));
+            }
+          } else {
+            columnIds.add(tableColumns.indexOf(columnName));
+            selectedColumnNames.add(columnName);
+          }
+        }
+        ColumnProjectionUtils.appendReadColumnIDs(job, columnIds);
+        ColumnProjectionUtils.appendReadColumnNames(job, selectedColumnNames);
+      }
+
+      for (String columnName : selectedColumnNames) {
+        ObjectInspector fieldOI = sInspector.getStructFieldRef(columnName).getFieldObjectInspector();
+        TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(fieldOI.getTypeName());
+
+        selectedColumnObjInspectors.add(fieldOI);
+        selectedColumnTypes.add(typeInfo);
+        selectedColumnFieldConverters.add(HiveFieldConverter.create(typeInfo));
+      }
+
+      if (projectedColumns == null) {
+        selectedPartitionNames = partitionNames;
+      }
+
+      for (int i = 0; i < table.getPartitionKeys().size(); i++) {
+        FieldSchema field = table.getPartitionKeys().get(i);
+        if (selectedPartitionNames.contains(field.getName())) {
+          TypeInfo pType = TypeInfoUtils.getTypeInfoFromTypeString(field.getType());
+          selectedPartitionTypes.add(pType);
+
+          if (partition != null) {
+            selectedPartitionValues.add(convertPartitionType(pType, partition.getValues().get(i)));
+          }
+        }
+      }
+    } catch (Exception e) {
+      throw new ExecutionSetupException("Failure while initializing HiveRecordReader: " + e.getMessage(), e);
+    }
+
+    if (!empty) {
+      try {
+        reader = format.getRecordReader(inputSplit, job, Reporter.NULL);
+      } catch (IOException e) {
+        throw new ExecutionSetupException("Failed to get o.a.hadoop.mapred.RecordReader from Hive InputFormat", e);
+      }
+      key = reader.createKey();
+      value = reader.createValue();
+    }
+  }
+
+  @Override
+  public void setup(OutputMutator output) throws ExecutionSetupException {
+    try {
+      for (int i = 0; i < selectedColumnNames.size(); i++) {
+        MajorType type = Types.optional(getMinorTypeFromHiveTypeInfo(selectedColumnTypes.get(i)));
+        MaterializedField field = MaterializedField.create(SchemaPath.getSimplePath(selectedColumnNames.get(i)), type);
+        Class vvClass = TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode());
+        vectors.add(output.addField(field, vvClass));
+      }
+
+      for (int i = 0; i < selectedPartitionNames.size(); i++) {
+        MajorType type = Types.required(getMinorTypeFromHiveTypeInfo(selectedPartitionTypes.get(i)));
+        MaterializedField field = MaterializedField.create(SchemaPath.getSimplePath(selectedPartitionNames.get(i)), type);
+        Class vvClass = TypeHelper.getValueVectorClass(field.getType().getMinorType(), field.getDataMode());
+        pVectors.add(output.addField(field, vvClass));
+      }
+    } catch(SchemaChangeException e) {
+      throw new ExecutionSetupException(e);
+    }
+  }
+
+  @Override
+  public int next() {
+    if (empty) {
+      return 0;
+    }
+
+    for (ValueVector vv : vectors) {
+      VectorAllocator.getAllocator(vv, FIELD_SIZE).alloc(TARGET_RECORD_COUNT);
+    }
+
+    try {
+      int recordCount = 0;
+
+      if (redoRecord != null) {
+        // Try writing the record that didn't fit into the last RecordBatch
+        Object deSerializedValue = serde.deserialize((Writable) redoRecord);
+        boolean status = readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, recordCount);
+        if (!status) {
+          throw new DrillRuntimeException("Current record is too big to fit into allocated ValueVector buffer");
+        }
+        redoRecord = null;
+        recordCount++;
+      }
+
+      while (recordCount < TARGET_RECORD_COUNT && reader.next(key, value)) {
+        Object deSerializedValue = serde.deserialize((Writable) value);
+        boolean status = readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, recordCount);
+        if (!status) {
+          redoRecord = value;
+          setValueCountAndPopulatePartitionVectors(recordCount);
+          return recordCount;
+        }
+        recordCount++;
+      }
+
+      setValueCountAndPopulatePartitionVectors(recordCount);
+      return recordCount;
+    } catch (IOException | SerDeException e) {
+      throw new DrillRuntimeException(e);
+    }
+  }
+
+  private boolean readHiveRecordAndInsertIntoRecordBatch(Object deSerializedValue, int outputRecordIndex) {
+    boolean success;
+    for (int i = 0; i < selectedColumnNames.size(); i++) {
+      String columnName = selectedColumnNames.get(i);
+      Object hiveValue = sInspector.getStructFieldData(deSerializedValue, sInspector.getStructFieldRef(columnName));
+
+      if (hiveValue != null) {
+        success = selectedColumnFieldConverters.get(i).setSafeValue(selectedColumnObjInspectors.get(i), hiveValue,
+            vectors.get(i), outputRecordIndex);
+
+        if (!success) {
+          return false;
+        }
+      }
+    }
+
+    return true;
+  }
+
+  private void setValueCountAndPopulatePartitionVectors(int recordCount) {
+    for (ValueVector v : vectors) {
+      v.getMutator().setValueCount(recordCount);
+    }
+
+    if (partition != null) {
+      populatePartitionVectors(recordCount);
+    }
+  }
+
+  @Override
+  public void cleanup() {
+  }
+
+  public static MinorType getMinorTypeFromHivePrimitiveTypeInfo(PrimitiveTypeInfo primitiveTypeInfo) {
+    switch(primitiveTypeInfo.getPrimitiveCategory()) {
+      case BINARY:
+        return TypeProtos.MinorType.VARBINARY;
+      case BOOLEAN:
+        return TypeProtos.MinorType.BIT;
+      case BYTE:
+        return TypeProtos.MinorType.TINYINT;
+      case DECIMAL:
+        return TypeProtos.MinorType.VARCHAR;
+      case DOUBLE:
+        return TypeProtos.MinorType.FLOAT8;
+      case FLOAT:
+        return TypeProtos.MinorType.FLOAT4;
+      case INT:
+        return TypeProtos.MinorType.INT;
+      case LONG:
+        return TypeProtos.MinorType.BIGINT;
+      case SHORT:
+        return TypeProtos.MinorType.SMALLINT;
+      case STRING:
+      case VARCHAR:
+        return TypeProtos.MinorType.VARCHAR;
+      case TIMESTAMP:
+        return TypeProtos.MinorType.TIMESTAMP;
+      case DATE:
+        return TypeProtos.MinorType.DATE;
+    }
+
+    throwUnsupportedHiveDataTypeError(primitiveTypeInfo.getPrimitiveCategory().toString());
+    return null;
+  }
+
+  public static MinorType getMinorTypeFromHiveTypeInfo(TypeInfo typeInfo) {
+    switch (typeInfo.getCategory()) {
+      case PRIMITIVE:
+        return getMinorTypeFromHivePrimitiveTypeInfo(((PrimitiveTypeInfo) typeInfo));
+
+      case LIST:
+      case MAP:
+      case STRUCT:
+      case UNION:
+      default:
+        throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
+    }
+
+    return null;
+  }
+
+  protected void populatePartitionVectors(int recordCount) {
+    for (int i = 0; i < pVectors.size(); i++) {
+      int size = 50;
+      ValueVector vector = pVectors.get(i);
+      Object val = selectedPartitionValues.get(i);
+      PrimitiveCategory pCat = ((PrimitiveTypeInfo)selectedPartitionTypes.get(i)).getPrimitiveCategory();
+      if (pCat == PrimitiveCategory.BINARY || pCat == PrimitiveCategory.STRING || pCat == PrimitiveCategory.VARCHAR) {
+        size = ((byte[]) selectedPartitionValues.get(i)).length;
+      }
+
+      VectorAllocator.getAllocator(vector, size).alloc(recordCount);
+
+      switch(pCat) {
+        case BINARY: {
+          VarBinaryVector v = (VarBinaryVector) vector;
+          byte[] value = (byte[]) val;
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        case BOOLEAN: {
+          BitVector v = (BitVector) vector;
+          Boolean value = (Boolean) val;
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().set(j, value ? 1 : 0);
+          }
+          break;
+        }
+        case BYTE: {
+          TinyIntVector v = (TinyIntVector) vector;
+          byte value = (byte) val;
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        case DOUBLE: {
+          Float8Vector v = (Float8Vector) vector;
+          double value = (double) val;
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        case FLOAT: {
+          Float4Vector v = (Float4Vector) vector;
+          float value = (float) val;
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        case INT: {
+          IntVector v = (IntVector) vector;
+          int value = (int) val;
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        case LONG: {
+          BigIntVector v = (BigIntVector) vector;
+          long value = (long) val;
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        case SHORT: {
+          SmallIntVector v = (SmallIntVector) vector;
+          short value = (short) val;
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        case VARCHAR:
+        case STRING: {
+          VarCharVector v = (VarCharVector) vector;
+          byte[] value = (byte[]) val;
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        case TIMESTAMP: {
+          TimeStampVector v = (TimeStampVector) vector;
+          DateTime ts = new DateTime(((Timestamp) val).getTime()).withZoneRetainFields(DateTimeZone.UTC);
+          long value = ts.getMillis();
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        case DATE: {
+          DateVector v = (DateVector) vector;
+          DateTime date = new DateTime(((Date)val).getTime()).withZoneRetainFields(DateTimeZone.UTC);
+          long value = date.getMillis();
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        case DECIMAL: {
+          VarCharVector v = (VarCharVector) vector;
+          byte[] value = ((HiveDecimal) val).toString().getBytes();
+          for (int j = 0; j < recordCount; j++) {
+            v.getMutator().setSafe(j, value);
+          }
+          break;
+        }
+        default:
+          throwUnsupportedHiveDataTypeError(pCat.toString());
+      }
+      vector.getMutator().setValueCount(recordCount);
+    }
+  }
+
+  /** Partition value is received in string format. Convert it into appropriate object based on the type. */
+  private Object convertPartitionType(TypeInfo typeInfo, String value) {
+    if (typeInfo.getCategory() != Category.PRIMITIVE) {
+      // In Hive only primitive types are allowed as partition column types.
+      throw new DrillRuntimeException("Non-Primitive types are not allowed as partition column type in Hive, " +
+          "but received one: " + typeInfo.getCategory());
+    }
+
+    PrimitiveCategory pCat = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
+    switch (pCat) {
+      case BINARY:
+        return value.getBytes();
+      case BOOLEAN:
+        return Boolean.parseBoolean(value);
+      case BYTE:
+        return Byte.parseByte(value);
+      case DECIMAL:
+        return new HiveDecimal(value);
+      case DOUBLE:
+        return Double.parseDouble(value);
+      case FLOAT:
+        return Float.parseFloat(value);
+      case INT:
+        return Integer.parseInt(value);
+      case LONG:
+        return Long.parseLong(value);
+      case SHORT:
+        return Short.parseShort(value);
+      case STRING:
+      case VARCHAR:
+        return value.getBytes();
+      case TIMESTAMP:
+        return Timestamp.valueOf(value);
+      case DATE:
+        return Date.valueOf(value);
+    }
+
+    throwUnsupportedHiveDataTypeError(pCat.toString());
+    return null;
+  }
+
+  public static void throwUnsupportedHiveDataTypeError(String unsupportedType) {
+    StringBuilder errMsg = new StringBuilder();
+    errMsg.append(String.format("Unsupported Hive data type %s. ", unsupportedType));
+    errMsg.append(System.getProperty("line.separator"));
+    errMsg.append("Following Hive data types are supported in Drill for querying: ");
+    errMsg.append(
+        "BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, DATE, TIMESTAMP, BINARY, DECIMAL, STRING, and VARCHAR");
+
+    throw new RuntimeException(errMsg.toString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
new file mode 100644
index 0000000..2f217d9
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
@@ -0,0 +1,296 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.drill.common.exceptions.DrillRuntimeException;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.physical.EndpointAffinity;
+import org.apache.drill.exec.physical.base.AbstractGroupScan;
+import org.apache.drill.exec.physical.base.GroupScan;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.physical.base.ScanStats;
+import org.apache.drill.exec.physical.base.SubScan;
+import org.apache.drill.exec.physical.base.ScanStats.GroupScanProperty;
+import org.apache.drill.exec.proto.CoordinationProtos;
+import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
+import org.apache.drill.exec.store.StoragePluginRegistry;
+import org.apache.drill.exec.store.schedule.CompleteFileWork;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.mapred.FileInputFormat;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.JobConf;
+
+import com.fasterxml.jackson.annotation.JacksonInject;
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.io.ByteArrayDataOutput;
+import com.google.common.io.ByteStreams;
+
+@JsonTypeName("hive-scan")
+public class HiveScan extends AbstractGroupScan {
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveScan.class);
+
+  @JsonProperty("hive-table")
+  public HiveReadEntry hiveReadEntry;
+  @JsonIgnore
+  private final Table table;
+  @JsonIgnore
+  private List<InputSplit> inputSplits = Lists.newArrayList();
+  @JsonIgnore
+  public HiveStoragePlugin storagePlugin;
+  @JsonProperty("storage-plugin")
+  public String storagePluginName;
+
+  @JsonIgnore
+  public List<Partition> partitions;
+  @JsonIgnore
+  private final Collection<DrillbitEndpoint> endpoints;
+
+  @JsonProperty("columns")
+  public List<SchemaPath> columns;
+
+  @JsonIgnore
+  List<List<InputSplit>> mappings;
+
+  @JsonIgnore
+  Map<InputSplit, Partition> partitionMap = new HashMap();
+
+  @JsonCreator
+  public HiveScan(@JsonProperty("hive-table") HiveReadEntry hiveReadEntry, @JsonProperty("storage-plugin") String storagePluginName,
+      @JsonProperty("columns") List<SchemaPath> columns,
+      @JacksonInject StoragePluginRegistry pluginRegistry) throws ExecutionSetupException {
+    this.hiveReadEntry = hiveReadEntry;
+    this.table = hiveReadEntry.getTable();
+    this.storagePluginName = storagePluginName;
+    this.storagePlugin = (HiveStoragePlugin) pluginRegistry.getPlugin(storagePluginName);
+    this.columns = columns;
+    this.partitions = hiveReadEntry.getPartitions();
+    getSplits();
+    endpoints = storagePlugin.getContext().getBits();
+  }
+
+  public HiveScan(HiveReadEntry hiveReadEntry, HiveStoragePlugin storagePlugin, List<SchemaPath> columns) throws ExecutionSetupException {
+    this.table = hiveReadEntry.getTable();
+    this.hiveReadEntry = hiveReadEntry;
+    this.columns = columns;
+    this.partitions = hiveReadEntry.getPartitions();
+    getSplits();
+    endpoints = storagePlugin.getContext().getBits();
+    this.storagePluginName = storagePlugin.getName();
+  }
+
+  private HiveScan(HiveScan that) {
+    this.columns = that.columns;
+    this.endpoints = that.endpoints;
+    this.hiveReadEntry = that.hiveReadEntry;
+    this.inputSplits = that.inputSplits;
+    this.mappings = that.mappings;
+    this.partitionMap = that.partitionMap;
+    this.partitions = that.partitions;
+    this.storagePlugin = that.storagePlugin;
+    this.storagePluginName = that.storagePluginName;
+    this.table = that.table;
+  }
+
+  public List<SchemaPath> getColumns() {
+    return columns;
+  }
+
+  private void getSplits() throws ExecutionSetupException {
+    try {
+      if (partitions == null || partitions.size() == 0) {
+        Properties properties = MetaStoreUtils.getTableMetadata(table);
+        JobConf job = new JobConf();
+        for (Object obj : properties.keySet()) {
+          job.set((String) obj, (String) properties.get(obj));
+        }
+        InputFormat<?, ?> format = (InputFormat<?, ?>) Class.forName(table.getSd().getInputFormat()).getConstructor().newInstance();
+        job.setInputFormat(format.getClass());
+        Path path = new Path(table.getSd().getLocation());
+        FileInputFormat.addInputPath(job, path);
+        format = job.getInputFormat();
+        for (InputSplit split : format.getSplits(job, 1)) {
+          inputSplits.add(split);
+        }
+        for (InputSplit split : inputSplits) {
+          partitionMap.put(split, null);
+        }
+      } else {
+        for (Partition partition : partitions) {
+          Properties properties = MetaStoreUtils.getPartitionMetadata(partition, table);
+          JobConf job = new JobConf();
+          for (Object obj : properties.keySet()) {
+            job.set((String) obj, (String) properties.get(obj));
+          }
+          InputFormat<?, ?> format = (InputFormat<?, ?>) Class.forName(partition.getSd().getInputFormat()).getConstructor().newInstance();
+          job.setInputFormat(format.getClass());
+          FileInputFormat.addInputPath(job, new Path(partition.getSd().getLocation()));
+          format = job.getInputFormat();
+          InputSplit[] splits = format.getSplits(job,1);
+          for (InputSplit split : splits) {
+            inputSplits.add(split);
+            partitionMap.put(split, partition);
+          }
+        }
+      }
+    } catch (ReflectiveOperationException | IOException e) {
+      throw new ExecutionSetupException(e);
+    }
+  }
+
+  @Override
+  public void applyAssignments(List<CoordinationProtos.DrillbitEndpoint> endpoints) {
+    mappings = Lists.newArrayList();
+    for (int i = 0; i < endpoints.size(); i++) {
+      mappings.add(new ArrayList<InputSplit>());
+    }
+    int count = endpoints.size();
+    for (int i = 0; i < inputSplits.size(); i++) {
+      mappings.get(i % count).add(inputSplits.get(i));
+    }
+  }
+
+  public static String serializeInputSplit(InputSplit split) throws IOException {
+    ByteArrayDataOutput byteArrayOutputStream =  ByteStreams.newDataOutput();
+    split.write(byteArrayOutputStream);
+    String encoded = Base64.encodeBase64String(byteArrayOutputStream.toByteArray());
+    logger.debug("Encoded split string for split {} : {}", split, encoded);
+    return encoded;
+  }
+
+  @Override
+  public SubScan getSpecificScan(int minorFragmentId) throws ExecutionSetupException {
+    try {
+      List<InputSplit> splits = mappings.get(minorFragmentId);
+      List<Partition> parts = Lists.newArrayList();
+      List<String> encodedInputSplits = Lists.newArrayList();
+      List<String> splitTypes = Lists.newArrayList();
+      for (InputSplit split : splits) {
+        parts.add(partitionMap.get(split));
+        encodedInputSplits.add(serializeInputSplit(split));
+        splitTypes.add(split.getClass().getCanonicalName());
+      }
+      if (parts.contains(null)) parts = null;
+      return new HiveSubScan(encodedInputSplits, hiveReadEntry, splitTypes, columns);
+    } catch (IOException | ReflectiveOperationException e) {
+      throw new ExecutionSetupException(e);
+    }
+  }
+
+  @Override
+  public int getMaxParallelizationWidth() {
+    return inputSplits.size();
+  }
+
+  @Override
+  public List<EndpointAffinity> getOperatorAffinity() {
+    Map<String, DrillbitEndpoint> endpointMap = new HashMap<>();
+    for (DrillbitEndpoint endpoint : endpoints) {
+      endpointMap.put(endpoint.getAddress(), endpoint);
+      logger.debug("endpoing address: {}", endpoint.getAddress());
+    }
+    Map<DrillbitEndpoint, EndpointAffinity> affinityMap = new HashMap<>();
+    try {
+      long totalSize = 0;
+      for (InputSplit split : inputSplits) {
+        totalSize += Math.max(1, split.getLength());
+      }
+      for (InputSplit split : inputSplits) {
+        float affinity = ((float) Math.max(1, split.getLength())) / totalSize;
+        for (String loc : split.getLocations()) {
+          logger.debug("split location: {}", loc);
+          DrillbitEndpoint endpoint = endpointMap.get(loc);
+          if (endpoint != null) {
+            if (affinityMap.containsKey(endpoint)) {
+              affinityMap.get(endpoint).addAffinity(affinity);
+            } else {
+              affinityMap.put(endpoint, new EndpointAffinity(endpoint, affinity));
+            }
+          }
+        }
+      }
+    } catch (IOException e) {
+      throw new DrillRuntimeException(e);
+    }
+    for (DrillbitEndpoint ep : affinityMap.keySet()) {
+      Preconditions.checkNotNull(ep);
+    }
+    for (EndpointAffinity a : affinityMap.values()) {
+      Preconditions.checkNotNull(a.getEndpoint());
+    }
+    return Lists.newArrayList(affinityMap.values());
+  }
+
+  @Override
+  public ScanStats getScanStats() {
+    try {
+      long data =0;
+      for(InputSplit split : inputSplits){
+          data += split.getLength();
+      }
+
+      long estRowCount = data/1024;
+      return new ScanStats(GroupScanProperty.NO_EXACT_ROW_COUNT, estRowCount, 1, data);
+    } catch (IOException e) {
+      throw new DrillRuntimeException(e);
+    }
+  }
+
+  @Override
+  public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) throws ExecutionSetupException {
+    return new HiveScan(this);
+  }
+
+  @Override
+  public String getDigest() {
+    return toString();
+  }
+
+  @Override
+  public String toString() {
+    return "HiveScan [table=" + table
+        + ", inputSplits=" + inputSplits
+        + ", columns=" + columns + "]";
+  }
+
+  @Override
+  public GroupScan clone(List<SchemaPath> columns) {
+    HiveScan newScan = new HiveScan(this);
+    newScan.columns = columns;
+    return newScan;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java
new file mode 100644
index 0000000..6e540ad
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive;
+
+import com.google.common.collect.Lists;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.exec.ops.FragmentContext;
+import org.apache.drill.exec.physical.impl.BatchCreator;
+import org.apache.drill.exec.physical.impl.ScanBatch;
+import org.apache.drill.exec.record.RecordBatch;
+import org.apache.drill.exec.store.RecordReader;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
+import org.apache.hadoop.mapred.InputSplit;
+import org.apache.hadoop.mapred.TextInputFormat;
+
+import java.util.List;
+
+public class HiveScanBatchCreator implements BatchCreator<HiveSubScan> {
+
+  @Override
+  public RecordBatch getBatch(FragmentContext context, HiveSubScan config, List<RecordBatch> children) throws ExecutionSetupException {
+    List<RecordReader> readers = Lists.newArrayList();
+    Table table = config.getTable();
+    List<InputSplit> splits = config.getInputSplits();
+    List<Partition> partitions = config.getPartitions();
+    boolean hasPartitions = (partitions != null && partitions.size() > 0);
+    int i = 0;
+
+    // Native hive text record reader doesn't handle all types currently. For now use HiveRecordReader which uses
+    // Hive InputFormat and SerDe classes to read the data.
+    //if (table.getSd().getInputFormat().equals(TextInputFormat.class.getCanonicalName()) &&
+    //        table.getSd().getSerdeInfo().getSerializationLib().equals(LazySimpleSerDe.class.getCanonicalName()) &&
+    //        config.getColumns() != null) {
+    //  for (InputSplit split : splits) {
+    //    readers.add(new HiveTextRecordReader(table,
+    //        (hasPartitions ? partitions.get(i++) : null),
+    //        split, config.getColumns(), context));
+    //  }
+    //} else {
+      for (InputSplit split : splits) {
+        readers.add(new HiveRecordReader(table,
+            (hasPartitions ? partitions.get(i++) : null),
+            split, config.getColumns(), context));
+      }
+    //}
+
+    // If there are no readers created (which is possible when the table is empty), create an empty RecordReader to
+    // output the schema
+    if (readers.size() == 0) {
+      readers.add(new HiveRecordReader(table, null, null, config.getColumns(), context));
+    }
+
+    return new ScanBatch(config, context, readers.iterator());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
new file mode 100644
index 0000000..c5a6e2c
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive;
+
+import java.io.IOException;
+import java.util.List;
+
+import net.hydromatic.optiq.Schema;
+import net.hydromatic.optiq.Schema.TableType;
+import net.hydromatic.optiq.SchemaPlus;
+
+import org.apache.drill.common.JSONOptions;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.rpc.user.UserSession;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.store.AbstractStoragePlugin;
+import org.apache.drill.exec.store.hive.schema.HiveSchemaFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+public class HiveStoragePlugin extends AbstractStoragePlugin {
+
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveStoragePlugin.class);
+
+  private final HiveStoragePluginConfig config;
+  private final HiveSchemaFactory schemaFactory;
+  private final DrillbitContext context;
+  private final String name;
+
+  public HiveStoragePlugin(HiveStoragePluginConfig config, DrillbitContext context, String name) throws ExecutionSetupException {
+    this.config = config;
+    this.context = context;
+    this.schemaFactory = new HiveSchemaFactory(this, name, config.getHiveConf());
+    this.name = name;
+  }
+
+  public HiveStoragePluginConfig getConfig() {
+    return config;
+  }
+
+  public String getName(){
+    return name;
+  }
+
+  public DrillbitContext getContext() {
+    return context;
+  }
+
+  @Override
+  public HiveScan getPhysicalScan(JSONOptions selection, List<SchemaPath> columns) throws IOException {
+    HiveReadEntry hiveReadEntry = selection.getListWith(new ObjectMapper(), new TypeReference<HiveReadEntry>(){});
+    try {
+      if (hiveReadEntry.getJdbcTableType() == TableType.VIEW) {
+        throw new UnsupportedOperationException("Querying Hive views from Drill is not supported in current version.");
+      }
+
+      return new HiveScan(hiveReadEntry, this, null);   
+    } catch (ExecutionSetupException e) {
+      throw new IOException(e);
+    }
+  }
+
+  @Override
+  public void registerSchemas(UserSession session, SchemaPlus parent) {
+    schemaFactory.registerSchemas(session, parent);
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePluginConfig.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePluginConfig.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePluginConfig.java
new file mode 100644
index 0000000..cbd7906
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePluginConfig.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+
+import org.apache.drill.common.logical.StoragePluginConfigBase;
+import org.apache.hadoop.hive.conf.HiveConf;
+
+import java.util.Map;
+
+@JsonTypeName(HiveStoragePluginConfig.NAME)
+public class HiveStoragePluginConfig extends StoragePluginConfigBase {
+  @JsonProperty
+  public Map<String, String> configProps;
+  @JsonIgnore
+  private HiveConf hiveConf;
+
+  public static final String NAME = "hive";
+
+  @JsonIgnore
+  public HiveConf getHiveConf() {
+    if (hiveConf == null) {
+      hiveConf = new HiveConf();
+      if (configProps != null) {
+        for (Map.Entry<String, String> entry : configProps.entrySet()) {
+          hiveConf.set(entry.getKey(), entry.getValue());
+        }
+      }
+    }
+
+    return hiveConf;
+  }
+
+  @JsonCreator
+  public HiveStoragePluginConfig(@JsonProperty("config") Map<String, String> props) {
+    this.configProps = props;
+  }
+
+  @Override
+  public int hashCode() {
+    return configProps != null ? configProps.hashCode() : 0;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    HiveStoragePluginConfig that = (HiveStoragePluginConfig) o;
+
+    if (configProps != null ? !configProps.equals(that.configProps) : that.configProps != null) return false;
+
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
new file mode 100644
index 0000000..c0e479a
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.physical.base.AbstractBase;
+import org.apache.drill.exec.physical.base.PhysicalOperator;
+import org.apache.drill.exec.physical.base.PhysicalVisitor;
+import org.apache.drill.exec.physical.base.SubScan;
+import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.mapred.FileSplit;
+import org.apache.hadoop.mapred.InputSplit;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.io.ByteArrayDataInput;
+import com.google.common.io.ByteStreams;
+
+@JsonTypeName("hive-sub-scan")
+public class HiveSubScan extends AbstractBase implements SubScan {
+
+  private List<String> splits;
+
+  private HiveReadEntry hiveReadEntry;
+
+  private List<String> splitClasses;
+
+  private List<SchemaPath> columns;
+
+  @JsonIgnore
+  private List<InputSplit> inputSplits = Lists.newArrayList();
+  @JsonIgnore
+  private Table table;
+  @JsonIgnore
+  private List<Partition> partitions;
+
+  @JsonCreator
+  public HiveSubScan(@JsonProperty("splits") List<String> splits,
+                     @JsonProperty("hiveReadEntry") HiveReadEntry hiveReadEntry,
+                     @JsonProperty("splitClasses") List<String> splitClasses,
+                     @JsonProperty("columns") List<SchemaPath> columns) throws IOException, ReflectiveOperationException {
+    this.hiveReadEntry = hiveReadEntry;
+    this.table = hiveReadEntry.getTable();
+    this.partitions = hiveReadEntry.getPartitions();
+    this.splits = splits;
+    this.splitClasses = splitClasses;
+    this.columns = columns;
+
+    for (int i = 0; i < splits.size(); i++) {
+      inputSplits.add(deserializeInputSplit(splits.get(i), splitClasses.get(i)));
+    }
+  }
+
+  public List<String> getSplits() {
+    return splits;
+  }
+
+  public Table getTable() {
+    return table;
+  }
+
+  public List<Partition> getPartitions() {
+    return partitions;
+  }
+
+  public List<String> getSplitClasses() {
+    return splitClasses;
+  }
+
+  public List<SchemaPath> getColumns() {
+    return columns;
+  }
+
+  public List<InputSplit> getInputSplits() {
+    return inputSplits;
+  }
+
+  public HiveReadEntry getHiveReadEntry() {
+    return hiveReadEntry;
+  }
+
+  public static InputSplit deserializeInputSplit(String base64, String className) throws IOException, ReflectiveOperationException{
+    InputSplit split;
+    if (Class.forName(className) == FileSplit.class) {
+      split = new FileSplit((Path) null, 0, 0, (String[])null);
+    } else {
+      split = (InputSplit) Class.forName(className).getConstructor().newInstance();
+    }
+    ByteArrayDataInput byteArrayDataInput = ByteStreams.newDataInput(Base64.decodeBase64(base64));
+    split.readFields(byteArrayDataInput);
+    return split;
+  }
+
+  @Override
+  public <T, X, E extends Throwable> T accept(PhysicalVisitor<T, X, E> physicalVisitor, X value) throws E {
+    return physicalVisitor.visitSubScan(this, value);
+  }
+
+  @Override
+  public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) throws ExecutionSetupException {
+    try {
+      return new HiveSubScan(splits, hiveReadEntry, splitClasses, columns);
+    } catch (IOException | ReflectiveOperationException e) {
+      throw new ExecutionSetupException(e);
+    }
+  }
+
+  @Override
+  public Iterator<PhysicalOperator> iterator() {
+    return Iterators.emptyIterator();
+  }
+
+  @Override
+  public int getOperatorType() {
+    return CoreOperatorType.HIVE_SUB_SCAN_VALUE;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java
new file mode 100644
index 0000000..50c81e9
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java
@@ -0,0 +1,331 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonTypeName;
+import com.google.common.collect.Lists;
+
+@JsonTypeName("table")
+public class HiveTable {
+
+  @JsonIgnore
+  private Table table;
+
+  @JsonProperty
+  public String tableName;
+  @JsonProperty
+  public String dbName;
+  @JsonProperty
+  public String owner;
+  @JsonProperty
+  public int createTime;
+  @JsonProperty
+  public int lastAccessTime;
+  @JsonProperty
+  public int retention;
+  @JsonProperty
+  public StorageDescriptorWrapper sd;
+  @JsonProperty
+  public List<FieldSchemaWrapper> partitionKeys;
+  @JsonProperty
+  public Map<String,String> parameters;
+  @JsonProperty
+  public String viewOriginalText;
+  @JsonProperty
+  public String viewExpandedText;
+  @JsonProperty
+  public String tableType;
+
+  @JsonCreator
+  public HiveTable(@JsonProperty("tableName") String tableName, @JsonProperty("dbName") String dbName, @JsonProperty("owner") String owner, @JsonProperty("createTime") int createTime,
+                   @JsonProperty("lastAccessTime") int lastAccessTime, @JsonProperty("retention") int retention, @JsonProperty("sd") StorageDescriptorWrapper sd,
+                   @JsonProperty("partitionKeys") List<FieldSchemaWrapper> partitionKeys, @JsonProperty("parameters") Map<String, String> parameters,
+                   @JsonProperty("viewOriginalText") String viewOriginalText, @JsonProperty("viewExpandedText") String viewExpandedText, @JsonProperty("tableType") String tableType
+                   ) {
+    this.tableName = tableName;
+    this.dbName = dbName;
+    this.owner = owner;
+    this.createTime = createTime;
+    this.lastAccessTime = lastAccessTime;
+    this.retention = retention;
+    this.sd = sd;
+    this.partitionKeys = partitionKeys;
+    this.parameters = parameters;
+    this.viewOriginalText = viewOriginalText;
+    this.viewExpandedText = viewExpandedText;
+    this.tableType = tableType;
+
+    List<FieldSchema> partitionKeysUnwrapped = Lists.newArrayList();
+    for (FieldSchemaWrapper w : partitionKeys) partitionKeysUnwrapped.add(w.getFieldSchema());
+    StorageDescriptor sdUnwrapped = sd.getSd();
+    this.table = new Table(tableName, dbName, owner, createTime, lastAccessTime, retention, sdUnwrapped, partitionKeysUnwrapped,
+            parameters, viewOriginalText, viewExpandedText, tableType);
+  }
+
+  public HiveTable(Table table) {
+    if (table == null) return;
+    this.table = table;
+    this.tableName = table.getTableName();
+    this.dbName = table.getDbName();
+    this.owner = table.getOwner();
+    this.createTime = table.getCreateTime();
+    this.lastAccessTime = table.getLastAccessTime();
+    this.retention = table.getRetention();
+    this.sd = new StorageDescriptorWrapper(table.getSd());
+    this.partitionKeys = Lists.newArrayList();
+    for (FieldSchema f : table.getPartitionKeys()) this.partitionKeys.add(new FieldSchemaWrapper(f));
+    this.parameters = table.getParameters();
+    this.viewOriginalText = table.getViewOriginalText();
+    this.viewExpandedText = table.getViewExpandedText();
+    this.tableType = table.getTableType();
+  }
+
+  @JsonIgnore
+  public Table getTable() {
+    return table;
+  }
+
+  public static class HivePartition {
+
+    @JsonIgnore
+    private Partition partition;
+
+    @JsonProperty
+    public List<String> values;
+    @JsonProperty
+    public String tableName;
+    @JsonProperty
+    public String dbName;
+    @JsonProperty
+    public int createTime;
+    @JsonProperty
+    public int lastAccessTime;
+    @JsonProperty
+    public StorageDescriptorWrapper sd;
+    @JsonProperty
+    public Map<String,String> parameters;
+
+    @JsonCreator
+    public HivePartition(@JsonProperty("values") List<String> values, @JsonProperty("tableName") String tableName, @JsonProperty("dbName") String dbName, @JsonProperty("createTime") int createTime,
+                     @JsonProperty("lastAccessTime") int lastAccessTime,  @JsonProperty("sd") StorageDescriptorWrapper sd,
+                     @JsonProperty("parameters") Map<String, String> parameters
+    ) {
+      this.values = values;
+      this.tableName = tableName;
+      this.dbName = dbName;
+      this.createTime = createTime;
+      this.lastAccessTime = lastAccessTime;
+      this.sd = sd;
+      this.parameters = parameters;
+
+      StorageDescriptor sdUnwrapped = sd.getSd();
+      this.partition = new org.apache.hadoop.hive.metastore.api.Partition(values, tableName, dbName, createTime, lastAccessTime, sdUnwrapped, parameters);
+    }
+
+    public HivePartition(Partition partition) {
+      if (partition == null) return;
+      this.partition = partition;
+      this.values = partition.getValues();
+      this.tableName = partition.getTableName();
+      this.dbName = partition.getDbName();
+      this.createTime = partition.getCreateTime();
+      this.lastAccessTime = partition.getLastAccessTime();
+      this.sd = new StorageDescriptorWrapper(partition.getSd());
+      this.parameters = partition.getParameters();
+    }
+
+    @JsonIgnore
+    public Partition getPartition() {
+      return partition;
+    }
+  }
+
+  public static class StorageDescriptorWrapper {
+    @JsonIgnore
+    private StorageDescriptor sd;
+    @JsonProperty
+    public List<FieldSchemaWrapper> cols;
+    @JsonProperty
+    public String location;
+    @JsonProperty
+    public String inputFormat;
+    @JsonProperty
+    public String outputFormat;
+    @JsonProperty
+    public boolean compressed;
+    @JsonProperty
+    public int numBuckets;
+    @JsonProperty
+    public SerDeInfoWrapper serDeInfo;
+//    @JsonProperty
+//    public List<String> bucketCols;
+    @JsonProperty
+    public List<OrderWrapper> sortCols;
+    @JsonProperty
+    public Map<String,String> parameters;
+
+    @JsonCreator
+    public StorageDescriptorWrapper(@JsonProperty("cols") List<FieldSchemaWrapper> cols, @JsonProperty("location") String location, @JsonProperty("inputFormat") String inputFormat,
+                                    @JsonProperty("outputFormat") String outputFormat, @JsonProperty("compressed") boolean compressed, @JsonProperty("numBuckets") int numBuckets,
+                                    @JsonProperty("serDeInfo") SerDeInfoWrapper serDeInfo,  @JsonProperty("sortCols") List<OrderWrapper> sortCols,
+                                    @JsonProperty("parameters") Map<String,String> parameters) {
+      this.cols = cols;
+      this.location = location;
+      this.inputFormat = inputFormat;
+      this.outputFormat = outputFormat;
+      this.compressed = compressed;
+      this.numBuckets = numBuckets;
+      this.serDeInfo = serDeInfo;
+//      this.bucketCols = bucketCols;
+      this.sortCols = sortCols;
+      this.parameters = parameters;
+      List<FieldSchema> colsUnwrapped = Lists.newArrayList();
+      for (FieldSchemaWrapper w: cols) colsUnwrapped.add(w.getFieldSchema());
+      SerDeInfo serDeInfoUnwrapped = serDeInfo.getSerDeInfo();
+      List<Order> sortColsUnwrapped = Lists.newArrayList();
+      for (OrderWrapper w : sortCols) sortColsUnwrapped.add(w.getOrder());
+//      this.sd = new StorageDescriptor(colsUnwrapped, location, inputFormat, outputFormat, compressed, numBuckets, serDeInfoUnwrapped,
+//              bucketCols, sortColsUnwrapped, parameters);
+      this.sd = new StorageDescriptor(colsUnwrapped, location, inputFormat, outputFormat, compressed, numBuckets, serDeInfoUnwrapped,
+              null, sortColsUnwrapped, parameters);
+    }
+
+    public StorageDescriptorWrapper(StorageDescriptor sd) {
+      this.sd = sd;
+      this.cols = Lists.newArrayList();
+      for (FieldSchema f : sd.getCols()) this.cols.add(new FieldSchemaWrapper(f));
+      this.location = sd.getLocation();
+      this.inputFormat = sd.getInputFormat();
+      this.outputFormat = sd.getOutputFormat();
+      this.compressed = sd.isCompressed();
+      this.numBuckets = sd.getNumBuckets();
+      this.serDeInfo = new SerDeInfoWrapper(sd.getSerdeInfo());
+//      this.bucketCols = sd.getBucketCols();
+      this.sortCols = Lists.newArrayList();
+      for (Order o : sd.getSortCols()) this.sortCols.add(new OrderWrapper(o));
+      this.parameters = sd.getParameters();
+    }
+
+    @JsonIgnore
+    public StorageDescriptor getSd() {
+      return sd;
+    }
+
+  }
+
+  public static class SerDeInfoWrapper {
+    @JsonIgnore
+    private SerDeInfo serDeInfo;
+    @JsonProperty
+    public String name;
+    @JsonProperty
+    public String serializationLib;
+    @JsonProperty
+    public Map<String,String> parameters;
+
+    @JsonCreator
+    public SerDeInfoWrapper(@JsonProperty("name") String name, @JsonProperty("serializationLib") String serializationLib, @JsonProperty("parameters") Map<String, String> parameters) {
+      this.name = name;
+      this.serializationLib = serializationLib;
+      this.parameters = parameters;
+      this.serDeInfo = new SerDeInfo(name, serializationLib, parameters);
+    }
+
+    public SerDeInfoWrapper(SerDeInfo serDeInfo) {
+      this.serDeInfo = serDeInfo;
+      this.name = serDeInfo.getName();
+      this.serializationLib = serDeInfo.getSerializationLib();
+      this.parameters = serDeInfo.getParameters();
+    }
+
+    @JsonIgnore
+    public SerDeInfo getSerDeInfo() {
+      return serDeInfo;
+    }
+  }
+
+  public static class FieldSchemaWrapper {
+    @JsonIgnore
+    private FieldSchema fieldSchema;
+    @JsonProperty
+    public String name;
+    @JsonProperty
+    public String type;
+    @JsonProperty
+    public String comment;
+
+    @JsonCreator
+    public FieldSchemaWrapper(@JsonProperty("name") String name, @JsonProperty("type") String type, @JsonProperty("comment") String comment) {
+      this.name = name;
+      this.type = type;
+      this.comment = comment;
+      this.fieldSchema = new FieldSchema(name, type, comment);
+    }
+
+    public FieldSchemaWrapper(FieldSchema fieldSchema) {
+      this.fieldSchema = fieldSchema;
+      this.name = fieldSchema.getName();
+      this.type = fieldSchema.getType();
+      this.comment = fieldSchema.getComment();
+    }
+
+    @JsonIgnore
+    public FieldSchema getFieldSchema() {
+      return fieldSchema;
+    }
+  }
+
+  public static class OrderWrapper {
+    @JsonIgnore
+    private Order ord;
+    @JsonProperty
+    public String col;
+    @JsonProperty
+    public int order;
+
+    @JsonCreator
+    public OrderWrapper(@JsonProperty("col") String col, @JsonProperty("order") int order) {
+      this.col = col;
+      this.order = order;
+    }
+
+    public OrderWrapper(Order ord) {
+      this.ord = ord;
+      this.col = ord.getCol();
+      this.order = ord.getOrder();
+    }
+
+    @JsonIgnore
+    public Order getOrder() {
+      return ord;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
new file mode 100644
index 0000000..116603c
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.drill.common.exceptions.DrillRuntimeException;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.ops.FragmentContext;
+import org.apache.drill.exec.vector.NullableBigIntVector;
+import org.apache.drill.exec.vector.NullableIntVector;
+import org.apache.drill.exec.vector.NullableVarCharVector;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.allocator.VectorAllocator;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.InputSplit;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Note: Native hive text record reader is not complete in implementation. For now use
+ * {@link org.apache.drill.exec.store.hive.HiveRecordReader}.
+ */
+public class HiveTextRecordReader extends HiveRecordReader {
+
+  public final byte delimiter;
+  public final List<Integer> columnIds;
+  private final int numCols;
+
+  public HiveTextRecordReader(Table table, Partition partition, InputSplit inputSplit, List<SchemaPath> projectedColumns, FragmentContext context) throws ExecutionSetupException {
+    super(table, partition, inputSplit, projectedColumns, context);
+    String d = table.getSd().getSerdeInfo().getParameters().get("field.delim");
+    if (d != null) {
+      delimiter = d.getBytes()[0];
+    } else {
+      delimiter = (byte) 1;
+    }
+    assert delimiter > 0;
+    List<Integer> ids = Lists.newArrayList();
+    for (int i = 0; i < tableColumns.size(); i++) {
+      if (selectedColumnNames.contains(tableColumns.get(i))) {
+        ids.add(i);
+      }
+    }
+    columnIds = ids;
+    numCols = tableColumns.size();
+  }
+
+  public boolean setValue(PrimitiveObjectInspector.PrimitiveCategory pCat, ValueVector vv, int index, byte[] bytes, int start) {
+    switch(pCat) {
+      case BINARY:
+        throw new UnsupportedOperationException();
+      case BOOLEAN:
+        throw new UnsupportedOperationException();
+      case BYTE:
+        throw new UnsupportedOperationException();
+      case DECIMAL:
+        throw new UnsupportedOperationException();
+      case DOUBLE:
+        throw new UnsupportedOperationException();
+      case FLOAT:
+        throw new UnsupportedOperationException();
+      case INT: {
+        int value = 0;
+        byte b;
+        for (int i = start; (b = bytes[i]) != delimiter; i++) {
+          value = (value * 10) + b - 48;
+        }
+        return ((NullableIntVector) vv).getMutator().setSafe(index, value);
+      }
+      case LONG: {
+        long value = 0;
+        byte b;
+        for (int i = start; (b = bytes[i]) != delimiter; i++) {
+          value = (value * 10) + b - 48;
+        }
+        return ((NullableBigIntVector) vv).getMutator().setSafe(index, value);
+      }
+      case SHORT:
+        throw new UnsupportedOperationException();
+      case STRING: {
+        int end = start;
+        for (int i = start; i < bytes.length; i++) {
+          if (bytes[i] == delimiter) {
+            end = i;
+            break;
+          }
+          end = bytes.length;
+        }
+        return ((NullableVarCharVector) vv).getMutator().setSafe(index, bytes, start, end - start);
+      }
+      case TIMESTAMP:
+        throw new UnsupportedOperationException();
+
+      default:
+        throw new UnsupportedOperationException("Could not determine type");
+    }
+  }
+
+
+  @Override
+  public int next() {
+    for (ValueVector vv : vectors) {
+      VectorAllocator.getAllocator(vv, 50).alloc(TARGET_RECORD_COUNT);
+    }
+    try {
+      int recordCount = 0;
+      if (redoRecord != null) {
+        int length = ((Text) value).getLength();
+        byte[] bytes = ((Text) value).getBytes();
+        int[] delimPositions = new int[numCols];
+        delimPositions[0] = -1;
+        int p = 0;
+        for (int i = 0; i < length; i++) {
+          if (bytes[i] == delimiter) {
+            delimPositions[p++] = i;
+          }
+        }
+        for (int id : columnIds) {
+          boolean success = false; // setValue(primitiveCategories.get(id), vectors.get(id), recordCount, bytes, delimPositions[id]);
+          if (!success) {
+            throw new DrillRuntimeException(String.format("Failed to write value for column %s", selectedColumnNames.get(id)));
+          }
+
+        }
+        redoRecord = null;
+      }
+      while (recordCount < TARGET_RECORD_COUNT && reader.next(key, value)) {
+        int length = ((Text) value).getLength();
+        byte[] bytes = ((Text) value).getBytes();
+        int[] delimPositions = new int[numCols + 1];
+        delimPositions[0] = -1;
+        int p = 1;
+        for (int i = 0; i < length; i++) {
+          if (bytes[i] == delimiter) {
+            delimPositions[p++] = i;
+          }
+        }
+        for (int i = 0; i < columnIds.size(); i++) {
+          int id = columnIds.get(i);
+          boolean success = false; // setValue(primitiveCategories.get(i), vectors.get(i), recordCount, bytes, delimPositions[id] + 1);
+          if (!success) {
+            redoRecord = value;
+            if (partition != null) populatePartitionVectors(recordCount);
+            return recordCount;
+          }
+        }
+        recordCount++;
+      }
+      if (partition != null) populatePartitionVectors(recordCount);
+      return recordCount;
+    } catch (IOException e) {
+      throw new DrillRuntimeException(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
new file mode 100644
index 0000000..949fa06
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive.schema;
+
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.apache.drill.exec.planner.logical.DrillTable;
+import org.apache.drill.exec.store.hive.HiveReadEntry;
+import org.apache.drill.exec.store.hive.HiveStoragePlugin;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.reltype.RelDataTypeFactory;
+import org.eigenbase.sql.SqlCollation;
+import org.eigenbase.sql.type.SqlTypeName;
+
+public class DrillHiveTable extends DrillTable{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillHiveTable.class);
+  
+  protected final Table hiveTable;
+  
+  public DrillHiveTable(String storageEngineName, HiveStoragePlugin plugin, HiveReadEntry readEntry) {
+    super(storageEngineName, plugin, readEntry);
+    this.hiveTable = new Table(readEntry.getTable());
+  }
+
+  @Override
+  public RelDataType getRowType(RelDataTypeFactory typeFactory) {
+    List<RelDataType> typeList = Lists.newArrayList();
+    List<String> fieldNameList = Lists.newArrayList();
+
+    List<FieldSchema> hiveFields = hiveTable.getCols();
+    for(FieldSchema hiveField : hiveFields) {
+      fieldNameList.add(hiveField.getName());
+      typeList.add(getNullableRelDataTypeFromHiveType(
+          typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(hiveField.getType())));
+    }
+
+    for (FieldSchema field : hiveTable.getPartitionKeys()) {
+      fieldNameList.add(field.getName());
+      typeList.add(getNullableRelDataTypeFromHiveType(
+          typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(field.getType())));
+    }
+
+    return typeFactory.createStructType(typeList, fieldNameList);
+  }
+
+  private RelDataType getNullableRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
+    RelDataType relDataType = getRelDataTypeFromHiveType(typeFactory, typeInfo);
+    return typeFactory.createTypeWithNullability(relDataType, true);
+  }
+
+  private RelDataType getRelDataTypeFromHivePrimitiveType(RelDataTypeFactory typeFactory, PrimitiveTypeInfo pTypeInfo) {
+    switch(pTypeInfo.getPrimitiveCategory()) {
+      case BOOLEAN:
+        return typeFactory.createSqlType(SqlTypeName.BOOLEAN);
+
+      case BYTE:
+        return typeFactory.createSqlType(SqlTypeName.TINYINT);
+
+      case SHORT:
+        return typeFactory.createSqlType(SqlTypeName.SMALLINT);
+
+      case INT:
+        return typeFactory.createSqlType(SqlTypeName.INTEGER);
+
+      case LONG:
+        return typeFactory.createSqlType(SqlTypeName.BIGINT);
+
+      case FLOAT:
+        return typeFactory.createSqlType(SqlTypeName.FLOAT);
+
+      case DOUBLE:
+        return typeFactory.createSqlType(SqlTypeName.DOUBLE);
+
+      case DATE:
+        return typeFactory.createSqlType(SqlTypeName.DATE);
+
+      case TIMESTAMP:
+        return typeFactory.createSqlType(SqlTypeName.TIMESTAMP);
+
+      case BINARY:
+        return typeFactory.createSqlType(SqlTypeName.BINARY);
+
+      case DECIMAL:
+        final int precision = 38; // Hive 0.12 has standard precision
+        return typeFactory.createSqlType(SqlTypeName.DECIMAL, precision);
+
+      case STRING:
+      case VARCHAR: {
+        int maxLen = TypeInfoUtils.getCharacterLengthForType(pTypeInfo);
+        return typeFactory.createTypeWithCharsetAndCollation(
+          typeFactory.createSqlType(SqlTypeName.VARCHAR, maxLen), /*input type*/
+          Charset.forName("ISO-8859-1"), /*unicode char set*/
+          SqlCollation.IMPLICIT /* TODO: need to decide if implicit is the correct one */
+        );
+      }
+
+      case UNKNOWN:
+      case VOID:
+      default:
+        throwUnsupportedHiveDataTypeError(pTypeInfo.getPrimitiveCategory().toString());
+    }
+
+    return null;
+  }
+
+  private RelDataType getRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
+    switch(typeInfo.getCategory()) {
+      case PRIMITIVE:
+        return getRelDataTypeFromHivePrimitiveType(typeFactory, ((PrimitiveTypeInfo) typeInfo));
+
+      case LIST: {
+        ListTypeInfo listTypeInfo = (ListTypeInfo)typeInfo;
+        RelDataType listElemTypeInfo = getRelDataTypeFromHiveType(typeFactory, listTypeInfo.getListElementTypeInfo());
+        return typeFactory.createArrayType(listElemTypeInfo, -1);
+      }
+
+      case MAP: {
+        MapTypeInfo mapTypeInfo = (MapTypeInfo)typeInfo;
+        RelDataType keyType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapKeyTypeInfo());
+        RelDataType valueType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapValueTypeInfo());
+        return typeFactory.createMapType(keyType, valueType);
+      }
+
+      case STRUCT: {
+        StructTypeInfo structTypeInfo = (StructTypeInfo)typeInfo;
+        ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
+        ArrayList<TypeInfo> fieldHiveTypeInfoList = structTypeInfo.getAllStructFieldTypeInfos();
+        List<RelDataType> fieldRelDataTypeList = Lists.newArrayList();
+        for(TypeInfo fieldHiveType : fieldHiveTypeInfoList) {
+          fieldRelDataTypeList.add(getRelDataTypeFromHiveType(typeFactory, fieldHiveType));
+        }
+        return typeFactory.createStructType(fieldRelDataTypeList, fieldNames);
+      }
+
+      case UNION:
+        logger.warn("There is no UNION data type in SQL. Converting it to Sql type OTHER to avoid " +
+            "breaking INFORMATION_SCHEMA queries");
+        return typeFactory.createSqlType(SqlTypeName.OTHER);
+    }
+
+    throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
+    return null;
+  }
+
+  private void throwUnsupportedHiveDataTypeError(String hiveType) {
+    StringBuilder errMsg = new StringBuilder();
+    errMsg.append(String.format("Unsupported Hive data type %s. ", hiveType));
+    errMsg.append(System.getProperty("line.separator"));
+    errMsg.append("Following Hive data types are supported in Drill INFORMATION_SCHEMA: ");
+    errMsg.append("BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, DATE, TIMESTAMP, BINARY, DECIMAL, STRING, " +
+        "VARCHAR, LIST, MAP, STRUCT and UNION");
+
+    throw new RuntimeException(errMsg.toString());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
new file mode 100644
index 0000000..b575972
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive.schema;
+
+import net.hydromatic.optiq.Schema.TableType;
+import org.apache.drill.exec.planner.logical.DrillViewInfoProvider;
+import org.apache.drill.exec.store.hive.HiveReadEntry;
+import org.apache.drill.exec.store.hive.HiveStoragePlugin;
+
+public class DrillHiveViewTable extends DrillHiveTable implements DrillViewInfoProvider {
+
+  public DrillHiveViewTable(String storageEngineName, HiveStoragePlugin plugin, HiveReadEntry readEntry) {
+    super(storageEngineName, plugin, readEntry);
+  }
+
+  @Override
+  public TableType getJdbcTableType() {
+    return TableType.VIEW;
+  }
+
+  @Override
+  public String getViewSql() {
+    return hiveTable.getViewExpandedText();
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java
new file mode 100644
index 0000000..0df2374
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive.schema;
+
+import java.util.List;
+import java.util.Set;
+
+import net.hydromatic.optiq.Table;
+
+import org.apache.drill.exec.planner.logical.DrillTable;
+import org.apache.drill.exec.store.AbstractSchema;
+import org.apache.drill.exec.store.dfs.FileSystemConfig;
+import org.apache.drill.exec.store.hive.HiveStoragePluginConfig;
+import org.apache.drill.exec.store.hive.schema.HiveSchemaFactory.HiveSchema;
+
+import com.google.common.collect.Sets;
+
+public class HiveDatabaseSchema extends AbstractSchema{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveDatabaseSchema.class);
+
+  private final HiveSchema hiveSchema;
+  private final Set<String> tables;
+
+  public HiveDatabaseSchema( //
+      List<String> tableList, //
+      HiveSchema hiveSchema, //
+      String name) {
+    super(hiveSchema.getSchemaPath(), name);
+    this.hiveSchema = hiveSchema;
+    this.tables = Sets.newHashSet(tableList);
+  }
+
+  @Override
+  public Table getTable(String tableName) {
+    return hiveSchema.getDrillTable(this.name, tableName);
+  }
+
+  @Override
+  public Set<String> getTableNames() {
+    return tables;
+  }
+
+  @Override
+  public String getTypeName() {
+    return HiveStoragePluginConfig.NAME;
+  }
+
+}


[11/32] DRILL-875: Fixes for DRILL-707, DRILL-780, DRILL-835 (Schema change), DRILL-852, DRILL-876, DRILL_877, DRILL-878, DRILL-890

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/protobuf/UserBitShared.pb.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/UserBitShared.pb.h b/contrib/native/client/src/protobuf/UserBitShared.pb.h
index fb78904..ece6b7b 100644
--- a/contrib/native/client/src/protobuf/UserBitShared.pb.h
+++ b/contrib/native/client/src/protobuf/UserBitShared.pb.h
@@ -45,6 +45,12 @@ class ParsingError;
 class RecordBatchDef;
 class NamePart;
 class SerializedField;
+class QueryProfile;
+class MajorFragmentProfile;
+class MinorFragmentProfile;
+class OperatorProfile;
+class StreamProfile;
+class MetricValue;
 
 enum NamePart_Type {
   NamePart_Type_NAME = 0,
@@ -85,6 +91,97 @@ inline bool RpcChannel_Parse(
   return ::google::protobuf::internal::ParseNamedEnum<RpcChannel>(
     RpcChannel_descriptor(), name, value);
 }
+enum QueryType {
+  SQL = 1,
+  LOGICAL = 2,
+  PHYSICAL = 3
+};
+bool QueryType_IsValid(int value);
+const QueryType QueryType_MIN = SQL;
+const QueryType QueryType_MAX = PHYSICAL;
+const int QueryType_ARRAYSIZE = QueryType_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* QueryType_descriptor();
+inline const ::std::string& QueryType_Name(QueryType value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    QueryType_descriptor(), value);
+}
+inline bool QueryType_Parse(
+    const ::std::string& name, QueryType* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<QueryType>(
+    QueryType_descriptor(), name, value);
+}
+enum FragmentState {
+  SENDING = 0,
+  AWAITING_ALLOCATION = 1,
+  RUNNING = 2,
+  FINISHED = 3,
+  CANCELLED = 4,
+  FAILED = 5
+};
+bool FragmentState_IsValid(int value);
+const FragmentState FragmentState_MIN = SENDING;
+const FragmentState FragmentState_MAX = FAILED;
+const int FragmentState_ARRAYSIZE = FragmentState_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* FragmentState_descriptor();
+inline const ::std::string& FragmentState_Name(FragmentState value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    FragmentState_descriptor(), value);
+}
+inline bool FragmentState_Parse(
+    const ::std::string& name, FragmentState* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<FragmentState>(
+    FragmentState_descriptor(), name, value);
+}
+enum CoreOperatorType {
+  SINGLE_SENDER = 0,
+  BROADCAST_SENDER = 1,
+  FILTER = 2,
+  HASH_AGGREGATE = 3,
+  HASH_JOIN = 4,
+  MERGE_JOIN = 5,
+  HASH_PARTITION_SENDER = 6,
+  LIMIT = 7,
+  MERGING_RECEIVER = 8,
+  ORDERED_PARTITION_SENDER = 9,
+  PROJECT = 10,
+  RANDOM_RECEIVER = 11,
+  RANGE_SENDER = 12,
+  SCREEN = 13,
+  SELECTION_VECTOR_REMOVER = 14,
+  STREAMING_AGGREGATE = 15,
+  TOP_N_SORT = 16,
+  EXTERNAL_SORT = 17,
+  TRACE = 18,
+  UNION = 19,
+  OLD_SORT = 20,
+  PARQUET_ROW_GROUP_SCAN = 21,
+  HIVE_SUB_SCAN = 22,
+  SYSTEM_TABLE_SCAN = 23,
+  MOCK_SUB_SCAN = 24,
+  PARQUET_WRITER = 25,
+  DIRECT_SUB_SCAN = 26,
+  TEXT_WRITER = 27,
+  TEXT_SUB_SCAN = 28,
+  JSON_SUB_SCAN = 29,
+  INFO_SCHEMA_SUB_SCAN = 30
+};
+bool CoreOperatorType_IsValid(int value);
+const CoreOperatorType CoreOperatorType_MIN = SINGLE_SENDER;
+const CoreOperatorType CoreOperatorType_MAX = INFO_SCHEMA_SUB_SCAN;
+const int CoreOperatorType_ARRAYSIZE = CoreOperatorType_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* CoreOperatorType_descriptor();
+inline const ::std::string& CoreOperatorType_Name(CoreOperatorType value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    CoreOperatorType_descriptor(), value);
+}
+inline bool CoreOperatorType_Parse(
+    const ::std::string& name, CoreOperatorType* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<CoreOperatorType>(
+    CoreOperatorType_descriptor(), name, value);
+}
 // ===================================================================
 
 class UserCredentials : public ::google::protobuf::Message {
@@ -900,851 +997,2491 @@ class SerializedField : public ::google::protobuf::Message {
   void InitAsDefaultInstance();
   static SerializedField* default_instance_;
 };
-// ===================================================================
-
+// -------------------------------------------------------------------
 
-// ===================================================================
+class QueryProfile : public ::google::protobuf::Message {
+ public:
+  QueryProfile();
+  virtual ~QueryProfile();
 
-// UserCredentials
+  QueryProfile(const QueryProfile& from);
 
-// optional string user_name = 1;
-inline bool UserCredentials::has_user_name() const {
-  return (_has_bits_[0] & 0x00000001u) != 0;
-}
-inline void UserCredentials::set_has_user_name() {
-  _has_bits_[0] |= 0x00000001u;
-}
-inline void UserCredentials::clear_has_user_name() {
-  _has_bits_[0] &= ~0x00000001u;
-}
-inline void UserCredentials::clear_user_name() {
-  if (user_name_ != &::google::protobuf::internal::kEmptyString) {
-    user_name_->clear();
-  }
-  clear_has_user_name();
-}
-inline const ::std::string& UserCredentials::user_name() const {
-  return *user_name_;
-}
-inline void UserCredentials::set_user_name(const ::std::string& value) {
-  set_has_user_name();
-  if (user_name_ == &::google::protobuf::internal::kEmptyString) {
-    user_name_ = new ::std::string;
-  }
-  user_name_->assign(value);
-}
-inline void UserCredentials::set_user_name(const char* value) {
-  set_has_user_name();
-  if (user_name_ == &::google::protobuf::internal::kEmptyString) {
-    user_name_ = new ::std::string;
+  inline QueryProfile& operator=(const QueryProfile& from) {
+    CopyFrom(from);
+    return *this;
   }
-  user_name_->assign(value);
-}
-inline void UserCredentials::set_user_name(const char* value, size_t size) {
-  set_has_user_name();
-  if (user_name_ == &::google::protobuf::internal::kEmptyString) {
-    user_name_ = new ::std::string;
+
+  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+    return _unknown_fields_;
   }
-  user_name_->assign(reinterpret_cast<const char*>(value), size);
-}
-inline ::std::string* UserCredentials::mutable_user_name() {
-  set_has_user_name();
-  if (user_name_ == &::google::protobuf::internal::kEmptyString) {
-    user_name_ = new ::std::string;
+
+  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+    return &_unknown_fields_;
   }
-  return user_name_;
-}
-inline ::std::string* UserCredentials::release_user_name() {
-  clear_has_user_name();
-  if (user_name_ == &::google::protobuf::internal::kEmptyString) {
-    return NULL;
-  } else {
-    ::std::string* temp = user_name_;
-    user_name_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
-    return temp;
+
+  static const ::google::protobuf::Descriptor* descriptor();
+  static const QueryProfile& default_instance();
+
+  void Swap(QueryProfile* other);
+
+  // implements Message ----------------------------------------------
+
+  QueryProfile* New() const;
+  void CopyFrom(const ::google::protobuf::Message& from);
+  void MergeFrom(const ::google::protobuf::Message& from);
+  void CopyFrom(const QueryProfile& from);
+  void MergeFrom(const QueryProfile& from);
+  void Clear();
+  bool IsInitialized() const;
+
+  int ByteSize() const;
+  bool MergePartialFromCodedStream(
+      ::google::protobuf::io::CodedInputStream* input);
+  void SerializeWithCachedSizes(
+      ::google::protobuf::io::CodedOutputStream* output) const;
+  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+  int GetCachedSize() const { return _cached_size_; }
+  private:
+  void SharedCtor();
+  void SharedDtor();
+  void SetCachedSize(int size) const;
+  public:
+
+  ::google::protobuf::Metadata GetMetadata() const;
+
+  // nested types ----------------------------------------------------
+
+  // accessors -------------------------------------------------------
+
+  // optional .exec.shared.QueryId id = 1;
+  inline bool has_id() const;
+  inline void clear_id();
+  static const int kIdFieldNumber = 1;
+  inline const ::exec::shared::QueryId& id() const;
+  inline ::exec::shared::QueryId* mutable_id();
+  inline ::exec::shared::QueryId* release_id();
+  inline void set_allocated_id(::exec::shared::QueryId* id);
+
+  // optional .exec.shared.QueryType type = 2;
+  inline bool has_type() const;
+  inline void clear_type();
+  static const int kTypeFieldNumber = 2;
+  inline ::exec::shared::QueryType type() const;
+  inline void set_type(::exec::shared::QueryType value);
+
+  // optional int64 start = 3;
+  inline bool has_start() const;
+  inline void clear_start();
+  static const int kStartFieldNumber = 3;
+  inline ::google::protobuf::int64 start() const;
+  inline void set_start(::google::protobuf::int64 value);
+
+  // optional int64 end = 4;
+  inline bool has_end() const;
+  inline void clear_end();
+  static const int kEndFieldNumber = 4;
+  inline ::google::protobuf::int64 end() const;
+  inline void set_end(::google::protobuf::int64 value);
+
+  // optional string query = 5;
+  inline bool has_query() const;
+  inline void clear_query();
+  static const int kQueryFieldNumber = 5;
+  inline const ::std::string& query() const;
+  inline void set_query(const ::std::string& value);
+  inline void set_query(const char* value);
+  inline void set_query(const char* value, size_t size);
+  inline ::std::string* mutable_query();
+  inline ::std::string* release_query();
+  inline void set_allocated_query(::std::string* query);
+
+  // optional string plan = 6;
+  inline bool has_plan() const;
+  inline void clear_plan();
+  static const int kPlanFieldNumber = 6;
+  inline const ::std::string& plan() const;
+  inline void set_plan(const ::std::string& value);
+  inline void set_plan(const char* value);
+  inline void set_plan(const char* value, size_t size);
+  inline ::std::string* mutable_plan();
+  inline ::std::string* release_plan();
+  inline void set_allocated_plan(::std::string* plan);
+
+  // repeated .exec.shared.MajorFragmentProfile fragment_profile = 7;
+  inline int fragment_profile_size() const;
+  inline void clear_fragment_profile();
+  static const int kFragmentProfileFieldNumber = 7;
+  inline const ::exec::shared::MajorFragmentProfile& fragment_profile(int index) const;
+  inline ::exec::shared::MajorFragmentProfile* mutable_fragment_profile(int index);
+  inline ::exec::shared::MajorFragmentProfile* add_fragment_profile();
+  inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::MajorFragmentProfile >&
+      fragment_profile() const;
+  inline ::google::protobuf::RepeatedPtrField< ::exec::shared::MajorFragmentProfile >*
+      mutable_fragment_profile();
+
+  // @@protoc_insertion_point(class_scope:exec.shared.QueryProfile)
+ private:
+  inline void set_has_id();
+  inline void clear_has_id();
+  inline void set_has_type();
+  inline void clear_has_type();
+  inline void set_has_start();
+  inline void clear_has_start();
+  inline void set_has_end();
+  inline void clear_has_end();
+  inline void set_has_query();
+  inline void clear_has_query();
+  inline void set_has_plan();
+  inline void clear_has_plan();
+
+  ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+  ::exec::shared::QueryId* id_;
+  ::google::protobuf::int64 start_;
+  ::google::protobuf::int64 end_;
+  ::std::string* query_;
+  ::std::string* plan_;
+  ::google::protobuf::RepeatedPtrField< ::exec::shared::MajorFragmentProfile > fragment_profile_;
+  int type_;
+
+  mutable int _cached_size_;
+  ::google::protobuf::uint32 _has_bits_[(7 + 31) / 32];
+
+  friend void  protobuf_AddDesc_UserBitShared_2eproto();
+  friend void protobuf_AssignDesc_UserBitShared_2eproto();
+  friend void protobuf_ShutdownFile_UserBitShared_2eproto();
+
+  void InitAsDefaultInstance();
+  static QueryProfile* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class MajorFragmentProfile : public ::google::protobuf::Message {
+ public:
+  MajorFragmentProfile();
+  virtual ~MajorFragmentProfile();
+
+  MajorFragmentProfile(const MajorFragmentProfile& from);
+
+  inline MajorFragmentProfile& operator=(const MajorFragmentProfile& from) {
+    CopyFrom(from);
+    return *this;
   }
-}
-inline void UserCredentials::set_allocated_user_name(::std::string* user_name) {
-  if (user_name_ != &::google::protobuf::internal::kEmptyString) {
-    delete user_name_;
+
+  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+    return _unknown_fields_;
   }
-  if (user_name) {
-    set_has_user_name();
-    user_name_ = user_name;
-  } else {
-    clear_has_user_name();
-    user_name_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+
+  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+    return &_unknown_fields_;
   }
-}
 
-// -------------------------------------------------------------------
+  static const ::google::protobuf::Descriptor* descriptor();
+  static const MajorFragmentProfile& default_instance();
 
-// QueryId
+  void Swap(MajorFragmentProfile* other);
 
-// optional sfixed64 part1 = 1;
-inline bool QueryId::has_part1() const {
-  return (_has_bits_[0] & 0x00000001u) != 0;
-}
-inline void QueryId::set_has_part1() {
-  _has_bits_[0] |= 0x00000001u;
-}
-inline void QueryId::clear_has_part1() {
-  _has_bits_[0] &= ~0x00000001u;
-}
-inline void QueryId::clear_part1() {
-  part1_ = GOOGLE_LONGLONG(0);
-  clear_has_part1();
-}
-inline ::google::protobuf::int64 QueryId::part1() const {
-  return part1_;
-}
-inline void QueryId::set_part1(::google::protobuf::int64 value) {
-  set_has_part1();
-  part1_ = value;
-}
+  // implements Message ----------------------------------------------
 
-// optional sfixed64 part2 = 2;
-inline bool QueryId::has_part2() const {
-  return (_has_bits_[0] & 0x00000002u) != 0;
-}
-inline void QueryId::set_has_part2() {
-  _has_bits_[0] |= 0x00000002u;
-}
-inline void QueryId::clear_has_part2() {
-  _has_bits_[0] &= ~0x00000002u;
-}
-inline void QueryId::clear_part2() {
-  part2_ = GOOGLE_LONGLONG(0);
-  clear_has_part2();
-}
-inline ::google::protobuf::int64 QueryId::part2() const {
-  return part2_;
-}
-inline void QueryId::set_part2(::google::protobuf::int64 value) {
-  set_has_part2();
-  part2_ = value;
-}
+  MajorFragmentProfile* New() const;
+  void CopyFrom(const ::google::protobuf::Message& from);
+  void MergeFrom(const ::google::protobuf::Message& from);
+  void CopyFrom(const MajorFragmentProfile& from);
+  void MergeFrom(const MajorFragmentProfile& from);
+  void Clear();
+  bool IsInitialized() const;
+
+  int ByteSize() const;
+  bool MergePartialFromCodedStream(
+      ::google::protobuf::io::CodedInputStream* input);
+  void SerializeWithCachedSizes(
+      ::google::protobuf::io::CodedOutputStream* output) const;
+  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+  int GetCachedSize() const { return _cached_size_; }
+  private:
+  void SharedCtor();
+  void SharedDtor();
+  void SetCachedSize(int size) const;
+  public:
+
+  ::google::protobuf::Metadata GetMetadata() const;
+
+  // nested types ----------------------------------------------------
+
+  // accessors -------------------------------------------------------
+
+  // optional int32 major_fragment_id = 1;
+  inline bool has_major_fragment_id() const;
+  inline void clear_major_fragment_id();
+  static const int kMajorFragmentIdFieldNumber = 1;
+  inline ::google::protobuf::int32 major_fragment_id() const;
+  inline void set_major_fragment_id(::google::protobuf::int32 value);
+
+  // repeated .exec.shared.MinorFragmentProfile minor_fragment_profile = 2;
+  inline int minor_fragment_profile_size() const;
+  inline void clear_minor_fragment_profile();
+  static const int kMinorFragmentProfileFieldNumber = 2;
+  inline const ::exec::shared::MinorFragmentProfile& minor_fragment_profile(int index) const;
+  inline ::exec::shared::MinorFragmentProfile* mutable_minor_fragment_profile(int index);
+  inline ::exec::shared::MinorFragmentProfile* add_minor_fragment_profile();
+  inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::MinorFragmentProfile >&
+      minor_fragment_profile() const;
+  inline ::google::protobuf::RepeatedPtrField< ::exec::shared::MinorFragmentProfile >*
+      mutable_minor_fragment_profile();
+
+  // @@protoc_insertion_point(class_scope:exec.shared.MajorFragmentProfile)
+ private:
+  inline void set_has_major_fragment_id();
+  inline void clear_has_major_fragment_id();
+
+  ::google::protobuf::UnknownFieldSet _unknown_fields_;
 
+  ::google::protobuf::RepeatedPtrField< ::exec::shared::MinorFragmentProfile > minor_fragment_profile_;
+  ::google::protobuf::int32 major_fragment_id_;
+
+  mutable int _cached_size_;
+  ::google::protobuf::uint32 _has_bits_[(2 + 31) / 32];
+
+  friend void  protobuf_AddDesc_UserBitShared_2eproto();
+  friend void protobuf_AssignDesc_UserBitShared_2eproto();
+  friend void protobuf_ShutdownFile_UserBitShared_2eproto();
+
+  void InitAsDefaultInstance();
+  static MajorFragmentProfile* default_instance_;
+};
 // -------------------------------------------------------------------
 
-// DrillPBError
+class MinorFragmentProfile : public ::google::protobuf::Message {
+ public:
+  MinorFragmentProfile();
+  virtual ~MinorFragmentProfile();
 
-// optional string error_id = 1;
-inline bool DrillPBError::has_error_id() const {
-  return (_has_bits_[0] & 0x00000001u) != 0;
-}
-inline void DrillPBError::set_has_error_id() {
-  _has_bits_[0] |= 0x00000001u;
-}
-inline void DrillPBError::clear_has_error_id() {
-  _has_bits_[0] &= ~0x00000001u;
-}
-inline void DrillPBError::clear_error_id() {
-  if (error_id_ != &::google::protobuf::internal::kEmptyString) {
-    error_id_->clear();
+  MinorFragmentProfile(const MinorFragmentProfile& from);
+
+  inline MinorFragmentProfile& operator=(const MinorFragmentProfile& from) {
+    CopyFrom(from);
+    return *this;
   }
-  clear_has_error_id();
-}
-inline const ::std::string& DrillPBError::error_id() const {
-  return *error_id_;
+
+  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+    return _unknown_fields_;
+  }
+
+  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+    return &_unknown_fields_;
+  }
+
+  static const ::google::protobuf::Descriptor* descriptor();
+  static const MinorFragmentProfile& default_instance();
+
+  void Swap(MinorFragmentProfile* other);
+
+  // implements Message ----------------------------------------------
+
+  MinorFragmentProfile* New() const;
+  void CopyFrom(const ::google::protobuf::Message& from);
+  void MergeFrom(const ::google::protobuf::Message& from);
+  void CopyFrom(const MinorFragmentProfile& from);
+  void MergeFrom(const MinorFragmentProfile& from);
+  void Clear();
+  bool IsInitialized() const;
+
+  int ByteSize() const;
+  bool MergePartialFromCodedStream(
+      ::google::protobuf::io::CodedInputStream* input);
+  void SerializeWithCachedSizes(
+      ::google::protobuf::io::CodedOutputStream* output) const;
+  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+  int GetCachedSize() const { return _cached_size_; }
+  private:
+  void SharedCtor();
+  void SharedDtor();
+  void SetCachedSize(int size) const;
+  public:
+
+  ::google::protobuf::Metadata GetMetadata() const;
+
+  // nested types ----------------------------------------------------
+
+  // accessors -------------------------------------------------------
+
+  // optional .exec.shared.FragmentState state = 1;
+  inline bool has_state() const;
+  inline void clear_state();
+  static const int kStateFieldNumber = 1;
+  inline ::exec::shared::FragmentState state() const;
+  inline void set_state(::exec::shared::FragmentState value);
+
+  // optional .exec.shared.DrillPBError error = 2;
+  inline bool has_error() const;
+  inline void clear_error();
+  static const int kErrorFieldNumber = 2;
+  inline const ::exec::shared::DrillPBError& error() const;
+  inline ::exec::shared::DrillPBError* mutable_error();
+  inline ::exec::shared::DrillPBError* release_error();
+  inline void set_allocated_error(::exec::shared::DrillPBError* error);
+
+  // optional int32 minor_fragment_id = 3;
+  inline bool has_minor_fragment_id() const;
+  inline void clear_minor_fragment_id();
+  static const int kMinorFragmentIdFieldNumber = 3;
+  inline ::google::protobuf::int32 minor_fragment_id() const;
+  inline void set_minor_fragment_id(::google::protobuf::int32 value);
+
+  // repeated .exec.shared.OperatorProfile operator_profile = 4;
+  inline int operator_profile_size() const;
+  inline void clear_operator_profile();
+  static const int kOperatorProfileFieldNumber = 4;
+  inline const ::exec::shared::OperatorProfile& operator_profile(int index) const;
+  inline ::exec::shared::OperatorProfile* mutable_operator_profile(int index);
+  inline ::exec::shared::OperatorProfile* add_operator_profile();
+  inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::OperatorProfile >&
+      operator_profile() const;
+  inline ::google::protobuf::RepeatedPtrField< ::exec::shared::OperatorProfile >*
+      mutable_operator_profile();
+
+  // optional int64 start_time = 5;
+  inline bool has_start_time() const;
+  inline void clear_start_time();
+  static const int kStartTimeFieldNumber = 5;
+  inline ::google::protobuf::int64 start_time() const;
+  inline void set_start_time(::google::protobuf::int64 value);
+
+  // optional int64 end_time = 6;
+  inline bool has_end_time() const;
+  inline void clear_end_time();
+  static const int kEndTimeFieldNumber = 6;
+  inline ::google::protobuf::int64 end_time() const;
+  inline void set_end_time(::google::protobuf::int64 value);
+
+  // optional int64 memory_used = 7;
+  inline bool has_memory_used() const;
+  inline void clear_memory_used();
+  static const int kMemoryUsedFieldNumber = 7;
+  inline ::google::protobuf::int64 memory_used() const;
+  inline void set_memory_used(::google::protobuf::int64 value);
+
+  // optional int64 max_memory_used = 8;
+  inline bool has_max_memory_used() const;
+  inline void clear_max_memory_used();
+  static const int kMaxMemoryUsedFieldNumber = 8;
+  inline ::google::protobuf::int64 max_memory_used() const;
+  inline void set_max_memory_used(::google::protobuf::int64 value);
+
+  // optional .exec.DrillbitEndpoint endpoint = 9;
+  inline bool has_endpoint() const;
+  inline void clear_endpoint();
+  static const int kEndpointFieldNumber = 9;
+  inline const ::exec::DrillbitEndpoint& endpoint() const;
+  inline ::exec::DrillbitEndpoint* mutable_endpoint();
+  inline ::exec::DrillbitEndpoint* release_endpoint();
+  inline void set_allocated_endpoint(::exec::DrillbitEndpoint* endpoint);
+
+  // @@protoc_insertion_point(class_scope:exec.shared.MinorFragmentProfile)
+ private:
+  inline void set_has_state();
+  inline void clear_has_state();
+  inline void set_has_error();
+  inline void clear_has_error();
+  inline void set_has_minor_fragment_id();
+  inline void clear_has_minor_fragment_id();
+  inline void set_has_start_time();
+  inline void clear_has_start_time();
+  inline void set_has_end_time();
+  inline void clear_has_end_time();
+  inline void set_has_memory_used();
+  inline void clear_has_memory_used();
+  inline void set_has_max_memory_used();
+  inline void clear_has_max_memory_used();
+  inline void set_has_endpoint();
+  inline void clear_has_endpoint();
+
+  ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+  ::exec::shared::DrillPBError* error_;
+  int state_;
+  ::google::protobuf::int32 minor_fragment_id_;
+  ::google::protobuf::RepeatedPtrField< ::exec::shared::OperatorProfile > operator_profile_;
+  ::google::protobuf::int64 start_time_;
+  ::google::protobuf::int64 end_time_;
+  ::google::protobuf::int64 memory_used_;
+  ::google::protobuf::int64 max_memory_used_;
+  ::exec::DrillbitEndpoint* endpoint_;
+
+  mutable int _cached_size_;
+  ::google::protobuf::uint32 _has_bits_[(9 + 31) / 32];
+
+  friend void  protobuf_AddDesc_UserBitShared_2eproto();
+  friend void protobuf_AssignDesc_UserBitShared_2eproto();
+  friend void protobuf_ShutdownFile_UserBitShared_2eproto();
+
+  void InitAsDefaultInstance();
+  static MinorFragmentProfile* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class OperatorProfile : public ::google::protobuf::Message {
+ public:
+  OperatorProfile();
+  virtual ~OperatorProfile();
+
+  OperatorProfile(const OperatorProfile& from);
+
+  inline OperatorProfile& operator=(const OperatorProfile& from) {
+    CopyFrom(from);
+    return *this;
+  }
+
+  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+    return _unknown_fields_;
+  }
+
+  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+    return &_unknown_fields_;
+  }
+
+  static const ::google::protobuf::Descriptor* descriptor();
+  static const OperatorProfile& default_instance();
+
+  void Swap(OperatorProfile* other);
+
+  // implements Message ----------------------------------------------
+
+  OperatorProfile* New() const;
+  void CopyFrom(const ::google::protobuf::Message& from);
+  void MergeFrom(const ::google::protobuf::Message& from);
+  void CopyFrom(const OperatorProfile& from);
+  void MergeFrom(const OperatorProfile& from);
+  void Clear();
+  bool IsInitialized() const;
+
+  int ByteSize() const;
+  bool MergePartialFromCodedStream(
+      ::google::protobuf::io::CodedInputStream* input);
+  void SerializeWithCachedSizes(
+      ::google::protobuf::io::CodedOutputStream* output) const;
+  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+  int GetCachedSize() const { return _cached_size_; }
+  private:
+  void SharedCtor();
+  void SharedDtor();
+  void SetCachedSize(int size) const;
+  public:
+
+  ::google::protobuf::Metadata GetMetadata() const;
+
+  // nested types ----------------------------------------------------
+
+  // accessors -------------------------------------------------------
+
+  // repeated .exec.shared.StreamProfile input_profile = 1;
+  inline int input_profile_size() const;
+  inline void clear_input_profile();
+  static const int kInputProfileFieldNumber = 1;
+  inline const ::exec::shared::StreamProfile& input_profile(int index) const;
+  inline ::exec::shared::StreamProfile* mutable_input_profile(int index);
+  inline ::exec::shared::StreamProfile* add_input_profile();
+  inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::StreamProfile >&
+      input_profile() const;
+  inline ::google::protobuf::RepeatedPtrField< ::exec::shared::StreamProfile >*
+      mutable_input_profile();
+
+  // optional int32 operator_id = 3;
+  inline bool has_operator_id() const;
+  inline void clear_operator_id();
+  static const int kOperatorIdFieldNumber = 3;
+  inline ::google::protobuf::int32 operator_id() const;
+  inline void set_operator_id(::google::protobuf::int32 value);
+
+  // optional int32 operator_type = 4;
+  inline bool has_operator_type() const;
+  inline void clear_operator_type();
+  static const int kOperatorTypeFieldNumber = 4;
+  inline ::google::protobuf::int32 operator_type() const;
+  inline void set_operator_type(::google::protobuf::int32 value);
+
+  // optional int64 setup_nanos = 5;
+  inline bool has_setup_nanos() const;
+  inline void clear_setup_nanos();
+  static const int kSetupNanosFieldNumber = 5;
+  inline ::google::protobuf::int64 setup_nanos() const;
+  inline void set_setup_nanos(::google::protobuf::int64 value);
+
+  // optional int64 process_nanos = 6;
+  inline bool has_process_nanos() const;
+  inline void clear_process_nanos();
+  static const int kProcessNanosFieldNumber = 6;
+  inline ::google::protobuf::int64 process_nanos() const;
+  inline void set_process_nanos(::google::protobuf::int64 value);
+
+  // optional int64 local_memory_allocated = 7;
+  inline bool has_local_memory_allocated() const;
+  inline void clear_local_memory_allocated();
+  static const int kLocalMemoryAllocatedFieldNumber = 7;
+  inline ::google::protobuf::int64 local_memory_allocated() const;
+  inline void set_local_memory_allocated(::google::protobuf::int64 value);
+
+  // repeated .exec.shared.MetricValue metric = 8;
+  inline int metric_size() const;
+  inline void clear_metric();
+  static const int kMetricFieldNumber = 8;
+  inline const ::exec::shared::MetricValue& metric(int index) const;
+  inline ::exec::shared::MetricValue* mutable_metric(int index);
+  inline ::exec::shared::MetricValue* add_metric();
+  inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::MetricValue >&
+      metric() const;
+  inline ::google::protobuf::RepeatedPtrField< ::exec::shared::MetricValue >*
+      mutable_metric();
+
+  // @@protoc_insertion_point(class_scope:exec.shared.OperatorProfile)
+ private:
+  inline void set_has_operator_id();
+  inline void clear_has_operator_id();
+  inline void set_has_operator_type();
+  inline void clear_has_operator_type();
+  inline void set_has_setup_nanos();
+  inline void clear_has_setup_nanos();
+  inline void set_has_process_nanos();
+  inline void clear_has_process_nanos();
+  inline void set_has_local_memory_allocated();
+  inline void clear_has_local_memory_allocated();
+
+  ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+  ::google::protobuf::RepeatedPtrField< ::exec::shared::StreamProfile > input_profile_;
+  ::google::protobuf::int32 operator_id_;
+  ::google::protobuf::int32 operator_type_;
+  ::google::protobuf::int64 setup_nanos_;
+  ::google::protobuf::int64 process_nanos_;
+  ::google::protobuf::int64 local_memory_allocated_;
+  ::google::protobuf::RepeatedPtrField< ::exec::shared::MetricValue > metric_;
+
+  mutable int _cached_size_;
+  ::google::protobuf::uint32 _has_bits_[(7 + 31) / 32];
+
+  friend void  protobuf_AddDesc_UserBitShared_2eproto();
+  friend void protobuf_AssignDesc_UserBitShared_2eproto();
+  friend void protobuf_ShutdownFile_UserBitShared_2eproto();
+
+  void InitAsDefaultInstance();
+  static OperatorProfile* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class StreamProfile : public ::google::protobuf::Message {
+ public:
+  StreamProfile();
+  virtual ~StreamProfile();
+
+  StreamProfile(const StreamProfile& from);
+
+  inline StreamProfile& operator=(const StreamProfile& from) {
+    CopyFrom(from);
+    return *this;
+  }
+
+  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+    return _unknown_fields_;
+  }
+
+  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+    return &_unknown_fields_;
+  }
+
+  static const ::google::protobuf::Descriptor* descriptor();
+  static const StreamProfile& default_instance();
+
+  void Swap(StreamProfile* other);
+
+  // implements Message ----------------------------------------------
+
+  StreamProfile* New() const;
+  void CopyFrom(const ::google::protobuf::Message& from);
+  void MergeFrom(const ::google::protobuf::Message& from);
+  void CopyFrom(const StreamProfile& from);
+  void MergeFrom(const StreamProfile& from);
+  void Clear();
+  bool IsInitialized() const;
+
+  int ByteSize() const;
+  bool MergePartialFromCodedStream(
+      ::google::protobuf::io::CodedInputStream* input);
+  void SerializeWithCachedSizes(
+      ::google::protobuf::io::CodedOutputStream* output) const;
+  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+  int GetCachedSize() const { return _cached_size_; }
+  private:
+  void SharedCtor();
+  void SharedDtor();
+  void SetCachedSize(int size) const;
+  public:
+
+  ::google::protobuf::Metadata GetMetadata() const;
+
+  // nested types ----------------------------------------------------
+
+  // accessors -------------------------------------------------------
+
+  // optional int64 records = 1;
+  inline bool has_records() const;
+  inline void clear_records();
+  static const int kRecordsFieldNumber = 1;
+  inline ::google::protobuf::int64 records() const;
+  inline void set_records(::google::protobuf::int64 value);
+
+  // optional int64 batches = 2;
+  inline bool has_batches() const;
+  inline void clear_batches();
+  static const int kBatchesFieldNumber = 2;
+  inline ::google::protobuf::int64 batches() const;
+  inline void set_batches(::google::protobuf::int64 value);
+
+  // optional int64 schemas = 3;
+  inline bool has_schemas() const;
+  inline void clear_schemas();
+  static const int kSchemasFieldNumber = 3;
+  inline ::google::protobuf::int64 schemas() const;
+  inline void set_schemas(::google::protobuf::int64 value);
+
+  // @@protoc_insertion_point(class_scope:exec.shared.StreamProfile)
+ private:
+  inline void set_has_records();
+  inline void clear_has_records();
+  inline void set_has_batches();
+  inline void clear_has_batches();
+  inline void set_has_schemas();
+  inline void clear_has_schemas();
+
+  ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+  ::google::protobuf::int64 records_;
+  ::google::protobuf::int64 batches_;
+  ::google::protobuf::int64 schemas_;
+
+  mutable int _cached_size_;
+  ::google::protobuf::uint32 _has_bits_[(3 + 31) / 32];
+
+  friend void  protobuf_AddDesc_UserBitShared_2eproto();
+  friend void protobuf_AssignDesc_UserBitShared_2eproto();
+  friend void protobuf_ShutdownFile_UserBitShared_2eproto();
+
+  void InitAsDefaultInstance();
+  static StreamProfile* default_instance_;
+};
+// -------------------------------------------------------------------
+
+class MetricValue : public ::google::protobuf::Message {
+ public:
+  MetricValue();
+  virtual ~MetricValue();
+
+  MetricValue(const MetricValue& from);
+
+  inline MetricValue& operator=(const MetricValue& from) {
+    CopyFrom(from);
+    return *this;
+  }
+
+  inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const {
+    return _unknown_fields_;
+  }
+
+  inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() {
+    return &_unknown_fields_;
+  }
+
+  static const ::google::protobuf::Descriptor* descriptor();
+  static const MetricValue& default_instance();
+
+  void Swap(MetricValue* other);
+
+  // implements Message ----------------------------------------------
+
+  MetricValue* New() const;
+  void CopyFrom(const ::google::protobuf::Message& from);
+  void MergeFrom(const ::google::protobuf::Message& from);
+  void CopyFrom(const MetricValue& from);
+  void MergeFrom(const MetricValue& from);
+  void Clear();
+  bool IsInitialized() const;
+
+  int ByteSize() const;
+  bool MergePartialFromCodedStream(
+      ::google::protobuf::io::CodedInputStream* input);
+  void SerializeWithCachedSizes(
+      ::google::protobuf::io::CodedOutputStream* output) const;
+  ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const;
+  int GetCachedSize() const { return _cached_size_; }
+  private:
+  void SharedCtor();
+  void SharedDtor();
+  void SetCachedSize(int size) const;
+  public:
+
+  ::google::protobuf::Metadata GetMetadata() const;
+
+  // nested types ----------------------------------------------------
+
+  // accessors -------------------------------------------------------
+
+  // optional int32 metric_id = 1;
+  inline bool has_metric_id() const;
+  inline void clear_metric_id();
+  static const int kMetricIdFieldNumber = 1;
+  inline ::google::protobuf::int32 metric_id() const;
+  inline void set_metric_id(::google::protobuf::int32 value);
+
+  // optional int64 long_value = 2;
+  inline bool has_long_value() const;
+  inline void clear_long_value();
+  static const int kLongValueFieldNumber = 2;
+  inline ::google::protobuf::int64 long_value() const;
+  inline void set_long_value(::google::protobuf::int64 value);
+
+  // optional double double_value = 3;
+  inline bool has_double_value() const;
+  inline void clear_double_value();
+  static const int kDoubleValueFieldNumber = 3;
+  inline double double_value() const;
+  inline void set_double_value(double value);
+
+  // @@protoc_insertion_point(class_scope:exec.shared.MetricValue)
+ private:
+  inline void set_has_metric_id();
+  inline void clear_has_metric_id();
+  inline void set_has_long_value();
+  inline void clear_has_long_value();
+  inline void set_has_double_value();
+  inline void clear_has_double_value();
+
+  ::google::protobuf::UnknownFieldSet _unknown_fields_;
+
+  ::google::protobuf::int64 long_value_;
+  double double_value_;
+  ::google::protobuf::int32 metric_id_;
+
+  mutable int _cached_size_;
+  ::google::protobuf::uint32 _has_bits_[(3 + 31) / 32];
+
+  friend void  protobuf_AddDesc_UserBitShared_2eproto();
+  friend void protobuf_AssignDesc_UserBitShared_2eproto();
+  friend void protobuf_ShutdownFile_UserBitShared_2eproto();
+
+  void InitAsDefaultInstance();
+  static MetricValue* default_instance_;
+};
+// ===================================================================
+
+
+// ===================================================================
+
+// UserCredentials
+
+// optional string user_name = 1;
+inline bool UserCredentials::has_user_name() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void UserCredentials::set_has_user_name() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void UserCredentials::clear_has_user_name() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void UserCredentials::clear_user_name() {
+  if (user_name_ != &::google::protobuf::internal::kEmptyString) {
+    user_name_->clear();
+  }
+  clear_has_user_name();
+}
+inline const ::std::string& UserCredentials::user_name() const {
+  return *user_name_;
+}
+inline void UserCredentials::set_user_name(const ::std::string& value) {
+  set_has_user_name();
+  if (user_name_ == &::google::protobuf::internal::kEmptyString) {
+    user_name_ = new ::std::string;
+  }
+  user_name_->assign(value);
+}
+inline void UserCredentials::set_user_name(const char* value) {
+  set_has_user_name();
+  if (user_name_ == &::google::protobuf::internal::kEmptyString) {
+    user_name_ = new ::std::string;
+  }
+  user_name_->assign(value);
+}
+inline void UserCredentials::set_user_name(const char* value, size_t size) {
+  set_has_user_name();
+  if (user_name_ == &::google::protobuf::internal::kEmptyString) {
+    user_name_ = new ::std::string;
+  }
+  user_name_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* UserCredentials::mutable_user_name() {
+  set_has_user_name();
+  if (user_name_ == &::google::protobuf::internal::kEmptyString) {
+    user_name_ = new ::std::string;
+  }
+  return user_name_;
+}
+inline ::std::string* UserCredentials::release_user_name() {
+  clear_has_user_name();
+  if (user_name_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = user_name_;
+    user_name_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void UserCredentials::set_allocated_user_name(::std::string* user_name) {
+  if (user_name_ != &::google::protobuf::internal::kEmptyString) {
+    delete user_name_;
+  }
+  if (user_name) {
+    set_has_user_name();
+    user_name_ = user_name;
+  } else {
+    clear_has_user_name();
+    user_name_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// -------------------------------------------------------------------
+
+// QueryId
+
+// optional sfixed64 part1 = 1;
+inline bool QueryId::has_part1() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void QueryId::set_has_part1() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void QueryId::clear_has_part1() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void QueryId::clear_part1() {
+  part1_ = GOOGLE_LONGLONG(0);
+  clear_has_part1();
+}
+inline ::google::protobuf::int64 QueryId::part1() const {
+  return part1_;
+}
+inline void QueryId::set_part1(::google::protobuf::int64 value) {
+  set_has_part1();
+  part1_ = value;
+}
+
+// optional sfixed64 part2 = 2;
+inline bool QueryId::has_part2() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void QueryId::set_has_part2() {
+  _has_bits_[0] |= 0x00000002u;
+}
+inline void QueryId::clear_has_part2() {
+  _has_bits_[0] &= ~0x00000002u;
+}
+inline void QueryId::clear_part2() {
+  part2_ = GOOGLE_LONGLONG(0);
+  clear_has_part2();
+}
+inline ::google::protobuf::int64 QueryId::part2() const {
+  return part2_;
+}
+inline void QueryId::set_part2(::google::protobuf::int64 value) {
+  set_has_part2();
+  part2_ = value;
+}
+
+// -------------------------------------------------------------------
+
+// DrillPBError
+
+// optional string error_id = 1;
+inline bool DrillPBError::has_error_id() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void DrillPBError::set_has_error_id() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void DrillPBError::clear_has_error_id() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void DrillPBError::clear_error_id() {
+  if (error_id_ != &::google::protobuf::internal::kEmptyString) {
+    error_id_->clear();
+  }
+  clear_has_error_id();
+}
+inline const ::std::string& DrillPBError::error_id() const {
+  return *error_id_;
+}
+inline void DrillPBError::set_error_id(const ::std::string& value) {
+  set_has_error_id();
+  if (error_id_ == &::google::protobuf::internal::kEmptyString) {
+    error_id_ = new ::std::string;
+  }
+  error_id_->assign(value);
+}
+inline void DrillPBError::set_error_id(const char* value) {
+  set_has_error_id();
+  if (error_id_ == &::google::protobuf::internal::kEmptyString) {
+    error_id_ = new ::std::string;
+  }
+  error_id_->assign(value);
+}
+inline void DrillPBError::set_error_id(const char* value, size_t size) {
+  set_has_error_id();
+  if (error_id_ == &::google::protobuf::internal::kEmptyString) {
+    error_id_ = new ::std::string;
+  }
+  error_id_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* DrillPBError::mutable_error_id() {
+  set_has_error_id();
+  if (error_id_ == &::google::protobuf::internal::kEmptyString) {
+    error_id_ = new ::std::string;
+  }
+  return error_id_;
+}
+inline ::std::string* DrillPBError::release_error_id() {
+  clear_has_error_id();
+  if (error_id_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = error_id_;
+    error_id_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void DrillPBError::set_allocated_error_id(::std::string* error_id) {
+  if (error_id_ != &::google::protobuf::internal::kEmptyString) {
+    delete error_id_;
+  }
+  if (error_id) {
+    set_has_error_id();
+    error_id_ = error_id;
+  } else {
+    clear_has_error_id();
+    error_id_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// optional .exec.DrillbitEndpoint endpoint = 2;
+inline bool DrillPBError::has_endpoint() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void DrillPBError::set_has_endpoint() {
+  _has_bits_[0] |= 0x00000002u;
+}
+inline void DrillPBError::clear_has_endpoint() {
+  _has_bits_[0] &= ~0x00000002u;
+}
+inline void DrillPBError::clear_endpoint() {
+  if (endpoint_ != NULL) endpoint_->::exec::DrillbitEndpoint::Clear();
+  clear_has_endpoint();
+}
+inline const ::exec::DrillbitEndpoint& DrillPBError::endpoint() const {
+  return endpoint_ != NULL ? *endpoint_ : *default_instance_->endpoint_;
+}
+inline ::exec::DrillbitEndpoint* DrillPBError::mutable_endpoint() {
+  set_has_endpoint();
+  if (endpoint_ == NULL) endpoint_ = new ::exec::DrillbitEndpoint;
+  return endpoint_;
+}
+inline ::exec::DrillbitEndpoint* DrillPBError::release_endpoint() {
+  clear_has_endpoint();
+  ::exec::DrillbitEndpoint* temp = endpoint_;
+  endpoint_ = NULL;
+  return temp;
+}
+inline void DrillPBError::set_allocated_endpoint(::exec::DrillbitEndpoint* endpoint) {
+  delete endpoint_;
+  endpoint_ = endpoint;
+  if (endpoint) {
+    set_has_endpoint();
+  } else {
+    clear_has_endpoint();
+  }
+}
+
+// optional int32 error_type = 3;
+inline bool DrillPBError::has_error_type() const {
+  return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void DrillPBError::set_has_error_type() {
+  _has_bits_[0] |= 0x00000004u;
+}
+inline void DrillPBError::clear_has_error_type() {
+  _has_bits_[0] &= ~0x00000004u;
+}
+inline void DrillPBError::clear_error_type() {
+  error_type_ = 0;
+  clear_has_error_type();
+}
+inline ::google::protobuf::int32 DrillPBError::error_type() const {
+  return error_type_;
+}
+inline void DrillPBError::set_error_type(::google::protobuf::int32 value) {
+  set_has_error_type();
+  error_type_ = value;
+}
+
+// optional string message = 4;
+inline bool DrillPBError::has_message() const {
+  return (_has_bits_[0] & 0x00000008u) != 0;
+}
+inline void DrillPBError::set_has_message() {
+  _has_bits_[0] |= 0x00000008u;
+}
+inline void DrillPBError::clear_has_message() {
+  _has_bits_[0] &= ~0x00000008u;
+}
+inline void DrillPBError::clear_message() {
+  if (message_ != &::google::protobuf::internal::kEmptyString) {
+    message_->clear();
+  }
+  clear_has_message();
+}
+inline const ::std::string& DrillPBError::message() const {
+  return *message_;
+}
+inline void DrillPBError::set_message(const ::std::string& value) {
+  set_has_message();
+  if (message_ == &::google::protobuf::internal::kEmptyString) {
+    message_ = new ::std::string;
+  }
+  message_->assign(value);
+}
+inline void DrillPBError::set_message(const char* value) {
+  set_has_message();
+  if (message_ == &::google::protobuf::internal::kEmptyString) {
+    message_ = new ::std::string;
+  }
+  message_->assign(value);
+}
+inline void DrillPBError::set_message(const char* value, size_t size) {
+  set_has_message();
+  if (message_ == &::google::protobuf::internal::kEmptyString) {
+    message_ = new ::std::string;
+  }
+  message_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* DrillPBError::mutable_message() {
+  set_has_message();
+  if (message_ == &::google::protobuf::internal::kEmptyString) {
+    message_ = new ::std::string;
+  }
+  return message_;
+}
+inline ::std::string* DrillPBError::release_message() {
+  clear_has_message();
+  if (message_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = message_;
+    message_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void DrillPBError::set_allocated_message(::std::string* message) {
+  if (message_ != &::google::protobuf::internal::kEmptyString) {
+    delete message_;
+  }
+  if (message) {
+    set_has_message();
+    message_ = message;
+  } else {
+    clear_has_message();
+    message_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// repeated .exec.shared.ParsingError parsing_error = 5;
+inline int DrillPBError::parsing_error_size() const {
+  return parsing_error_.size();
+}
+inline void DrillPBError::clear_parsing_error() {
+  parsing_error_.Clear();
+}
+inline const ::exec::shared::ParsingError& DrillPBError::parsing_error(int index) const {
+  return parsing_error_.Get(index);
+}
+inline ::exec::shared::ParsingError* DrillPBError::mutable_parsing_error(int index) {
+  return parsing_error_.Mutable(index);
+}
+inline ::exec::shared::ParsingError* DrillPBError::add_parsing_error() {
+  return parsing_error_.Add();
+}
+inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::ParsingError >&
+DrillPBError::parsing_error() const {
+  return parsing_error_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::exec::shared::ParsingError >*
+DrillPBError::mutable_parsing_error() {
+  return &parsing_error_;
+}
+
+// -------------------------------------------------------------------
+
+// ParsingError
+
+// optional int32 start_column = 2;
+inline bool ParsingError::has_start_column() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void ParsingError::set_has_start_column() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void ParsingError::clear_has_start_column() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void ParsingError::clear_start_column() {
+  start_column_ = 0;
+  clear_has_start_column();
+}
+inline ::google::protobuf::int32 ParsingError::start_column() const {
+  return start_column_;
+}
+inline void ParsingError::set_start_column(::google::protobuf::int32 value) {
+  set_has_start_column();
+  start_column_ = value;
+}
+
+// optional int32 start_row = 3;
+inline bool ParsingError::has_start_row() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void ParsingError::set_has_start_row() {
+  _has_bits_[0] |= 0x00000002u;
+}
+inline void ParsingError::clear_has_start_row() {
+  _has_bits_[0] &= ~0x00000002u;
+}
+inline void ParsingError::clear_start_row() {
+  start_row_ = 0;
+  clear_has_start_row();
+}
+inline ::google::protobuf::int32 ParsingError::start_row() const {
+  return start_row_;
+}
+inline void ParsingError::set_start_row(::google::protobuf::int32 value) {
+  set_has_start_row();
+  start_row_ = value;
+}
+
+// optional int32 end_column = 4;
+inline bool ParsingError::has_end_column() const {
+  return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void ParsingError::set_has_end_column() {
+  _has_bits_[0] |= 0x00000004u;
+}
+inline void ParsingError::clear_has_end_column() {
+  _has_bits_[0] &= ~0x00000004u;
+}
+inline void ParsingError::clear_end_column() {
+  end_column_ = 0;
+  clear_has_end_column();
+}
+inline ::google::protobuf::int32 ParsingError::end_column() const {
+  return end_column_;
+}
+inline void ParsingError::set_end_column(::google::protobuf::int32 value) {
+  set_has_end_column();
+  end_column_ = value;
+}
+
+// optional int32 end_row = 5;
+inline bool ParsingError::has_end_row() const {
+  return (_has_bits_[0] & 0x00000008u) != 0;
+}
+inline void ParsingError::set_has_end_row() {
+  _has_bits_[0] |= 0x00000008u;
+}
+inline void ParsingError::clear_has_end_row() {
+  _has_bits_[0] &= ~0x00000008u;
+}
+inline void ParsingError::clear_end_row() {
+  end_row_ = 0;
+  clear_has_end_row();
+}
+inline ::google::protobuf::int32 ParsingError::end_row() const {
+  return end_row_;
+}
+inline void ParsingError::set_end_row(::google::protobuf::int32 value) {
+  set_has_end_row();
+  end_row_ = value;
+}
+
+// -------------------------------------------------------------------
+
+// RecordBatchDef
+
+// optional int32 record_count = 1;
+inline bool RecordBatchDef::has_record_count() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void RecordBatchDef::set_has_record_count() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void RecordBatchDef::clear_has_record_count() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void RecordBatchDef::clear_record_count() {
+  record_count_ = 0;
+  clear_has_record_count();
+}
+inline ::google::protobuf::int32 RecordBatchDef::record_count() const {
+  return record_count_;
+}
+inline void RecordBatchDef::set_record_count(::google::protobuf::int32 value) {
+  set_has_record_count();
+  record_count_ = value;
 }
-inline void DrillPBError::set_error_id(const ::std::string& value) {
-  set_has_error_id();
-  if (error_id_ == &::google::protobuf::internal::kEmptyString) {
-    error_id_ = new ::std::string;
+
+// repeated .exec.shared.SerializedField field = 2;
+inline int RecordBatchDef::field_size() const {
+  return field_.size();
+}
+inline void RecordBatchDef::clear_field() {
+  field_.Clear();
+}
+inline const ::exec::shared::SerializedField& RecordBatchDef::field(int index) const {
+  return field_.Get(index);
+}
+inline ::exec::shared::SerializedField* RecordBatchDef::mutable_field(int index) {
+  return field_.Mutable(index);
+}
+inline ::exec::shared::SerializedField* RecordBatchDef::add_field() {
+  return field_.Add();
+}
+inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::SerializedField >&
+RecordBatchDef::field() const {
+  return field_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::exec::shared::SerializedField >*
+RecordBatchDef::mutable_field() {
+  return &field_;
+}
+
+// optional bool is_selection_vector_2 = 3;
+inline bool RecordBatchDef::has_is_selection_vector_2() const {
+  return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void RecordBatchDef::set_has_is_selection_vector_2() {
+  _has_bits_[0] |= 0x00000004u;
+}
+inline void RecordBatchDef::clear_has_is_selection_vector_2() {
+  _has_bits_[0] &= ~0x00000004u;
+}
+inline void RecordBatchDef::clear_is_selection_vector_2() {
+  is_selection_vector_2_ = false;
+  clear_has_is_selection_vector_2();
+}
+inline bool RecordBatchDef::is_selection_vector_2() const {
+  return is_selection_vector_2_;
+}
+inline void RecordBatchDef::set_is_selection_vector_2(bool value) {
+  set_has_is_selection_vector_2();
+  is_selection_vector_2_ = value;
+}
+
+// -------------------------------------------------------------------
+
+// NamePart
+
+// optional .exec.shared.NamePart.Type type = 1;
+inline bool NamePart::has_type() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void NamePart::set_has_type() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void NamePart::clear_has_type() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void NamePart::clear_type() {
+  type_ = 0;
+  clear_has_type();
+}
+inline ::exec::shared::NamePart_Type NamePart::type() const {
+  return static_cast< ::exec::shared::NamePart_Type >(type_);
+}
+inline void NamePart::set_type(::exec::shared::NamePart_Type value) {
+  assert(::exec::shared::NamePart_Type_IsValid(value));
+  set_has_type();
+  type_ = value;
+}
+
+// optional string name = 2;
+inline bool NamePart::has_name() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void NamePart::set_has_name() {
+  _has_bits_[0] |= 0x00000002u;
+}
+inline void NamePart::clear_has_name() {
+  _has_bits_[0] &= ~0x00000002u;
+}
+inline void NamePart::clear_name() {
+  if (name_ != &::google::protobuf::internal::kEmptyString) {
+    name_->clear();
   }
-  error_id_->assign(value);
+  clear_has_name();
+}
+inline const ::std::string& NamePart::name() const {
+  return *name_;
+}
+inline void NamePart::set_name(const ::std::string& value) {
+  set_has_name();
+  if (name_ == &::google::protobuf::internal::kEmptyString) {
+    name_ = new ::std::string;
+  }
+  name_->assign(value);
+}
+inline void NamePart::set_name(const char* value) {
+  set_has_name();
+  if (name_ == &::google::protobuf::internal::kEmptyString) {
+    name_ = new ::std::string;
+  }
+  name_->assign(value);
+}
+inline void NamePart::set_name(const char* value, size_t size) {
+  set_has_name();
+  if (name_ == &::google::protobuf::internal::kEmptyString) {
+    name_ = new ::std::string;
+  }
+  name_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* NamePart::mutable_name() {
+  set_has_name();
+  if (name_ == &::google::protobuf::internal::kEmptyString) {
+    name_ = new ::std::string;
+  }
+  return name_;
+}
+inline ::std::string* NamePart::release_name() {
+  clear_has_name();
+  if (name_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = name_;
+    name_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void NamePart::set_allocated_name(::std::string* name) {
+  if (name_ != &::google::protobuf::internal::kEmptyString) {
+    delete name_;
+  }
+  if (name) {
+    set_has_name();
+    name_ = name;
+  } else {
+    clear_has_name();
+    name_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// optional .exec.shared.NamePart child = 3;
+inline bool NamePart::has_child() const {
+  return (_has_bits_[0] & 0x00000004u) != 0;
+}
+inline void NamePart::set_has_child() {
+  _has_bits_[0] |= 0x00000004u;
+}
+inline void NamePart::clear_has_child() {
+  _has_bits_[0] &= ~0x00000004u;
+}
+inline void NamePart::clear_child() {
+  if (child_ != NULL) child_->::exec::shared::NamePart::Clear();
+  clear_has_child();
+}
+inline const ::exec::shared::NamePart& NamePart::child() const {
+  return child_ != NULL ? *child_ : *default_instance_->child_;
+}
+inline ::exec::shared::NamePart* NamePart::mutable_child() {
+  set_has_child();
+  if (child_ == NULL) child_ = new ::exec::shared::NamePart;
+  return child_;
+}
+inline ::exec::shared::NamePart* NamePart::release_child() {
+  clear_has_child();
+  ::exec::shared::NamePart* temp = child_;
+  child_ = NULL;
+  return temp;
+}
+inline void NamePart::set_allocated_child(::exec::shared::NamePart* child) {
+  delete child_;
+  child_ = child;
+  if (child) {
+    set_has_child();
+  } else {
+    clear_has_child();
+  }
+}
+
+// -------------------------------------------------------------------
+
+// SerializedField
+
+// optional .common.MajorType major_type = 1;
+inline bool SerializedField::has_major_type() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void SerializedField::set_has_major_type() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void SerializedField::clear_has_major_type() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void SerializedField::clear_major_type() {
+  if (major_type_ != NULL) major_type_->::common::MajorType::Clear();
+  clear_has_major_type();
+}
+inline const ::common::MajorType& SerializedField::major_type() const {
+  return major_type_ != NULL ? *major_type_ : *default_instance_->major_type_;
+}
+inline ::common::MajorType* SerializedField::mutable_major_type() {
+  set_has_major_type();
+  if (major_type_ == NULL) major_type_ = new ::common::MajorType;
+  return major_type_;
+}
+inline ::common::MajorType* SerializedField::release_major_type() {
+  clear_has_major_type();
+  ::common::MajorType* temp = major_type_;
+  major_type_ = NULL;
+  return temp;
+}
+inline void SerializedField::set_allocated_major_type(::common::MajorType* major_type) {
+  delete major_type_;
+  major_type_ = major_type;
+  if (major_type) {
+    set_has_major_type();
+  } else {
+    clear_has_major_type();
+  }
+}
+
+// optional .exec.shared.NamePart name_part = 2;
+inline bool SerializedField::has_name_part() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void SerializedField::set_has_name_part() {
+  _has_bits_[0] |= 0x00000002u;
+}
+inline void SerializedField::clear_has_name_part() {
+  _has_bits_[0] &= ~0x00000002u;
+}
+inline void SerializedField::clear_name_part() {
+  if (name_part_ != NULL) name_part_->::exec::shared::NamePart::Clear();
+  clear_has_name_part();
+}
+inline const ::exec::shared::NamePart& SerializedField::name_part() const {
+  return name_part_ != NULL ? *name_part_ : *default_instance_->name_part_;
+}
+inline ::exec::shared::NamePart* SerializedField::mutable_name_part() {
+  set_has_name_part();
+  if (name_part_ == NULL) name_part_ = new ::exec::shared::NamePart;
+  return name_part_;
+}
+inline ::exec::shared::NamePart* SerializedField::release_name_part() {
+  clear_has_name_part();
+  ::exec::shared::NamePart* temp = name_part_;
+  name_part_ = NULL;
+  return temp;
 }
-inline void DrillPBError::set_error_id(const char* value) {
-  set_has_error_id();
-  if (error_id_ == &::google::protobuf::internal::kEmptyString) {
-    error_id_ = new ::std::string;
+inline void SerializedField::set_allocated_name_part(::exec::shared::NamePart* name_part) {
+  delete name_part_;
+  name_part_ = name_part;
+  if (name_part) {
+    set_has_name_part();
+  } else {
+    clear_has_name_part();
   }
-  error_id_->assign(value);
 }
-inline void DrillPBError::set_error_id(const char* value, size_t size) {
-  set_has_error_id();
-  if (error_id_ == &::google::protobuf::internal::kEmptyString) {
-    error_id_ = new ::std::string;
-  }
-  error_id_->assign(reinterpret_cast<const char*>(value), size);
+
+// repeated .exec.shared.SerializedField child = 3;
+inline int SerializedField::child_size() const {
+  return child_.size();
 }
-inline ::std::string* DrillPBError::mutable_error_id() {
-  set_has_error_id();
-  if (error_id_ == &::google::protobuf::internal::kEmptyString) {
-    error_id_ = new ::std::string;
-  }
-  return error_id_;
+inline void SerializedField::clear_child() {
+  child_.Clear();
 }
-inline ::std::string* DrillPBError::release_error_id() {
-  clear_has_error_id();
-  if (error_id_ == &::google::protobuf::internal::kEmptyString) {
-    return NULL;
-  } else {
-    ::std::string* temp = error_id_;
-    error_id_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
-    return temp;
-  }
+inline const ::exec::shared::SerializedField& SerializedField::child(int index) const {
+  return child_.Get(index);
 }
-inline void DrillPBError::set_allocated_error_id(::std::string* error_id) {
-  if (error_id_ != &::google::protobuf::internal::kEmptyString) {
-    delete error_id_;
-  }
-  if (error_id) {
-    set_has_error_id();
-    error_id_ = error_id;
-  } else {
-    clear_has_error_id();
-    error_id_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
-  }
+inline ::exec::shared::SerializedField* SerializedField::mutable_child(int index) {
+  return child_.Mutable(index);
+}
+inline ::exec::shared::SerializedField* SerializedField::add_child() {
+  return child_.Add();
+}
+inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::SerializedField >&
+SerializedField::child() const {
+  return child_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::exec::shared::SerializedField >*
+SerializedField::mutable_child() {
+  return &child_;
 }
 
-// optional .exec.DrillbitEndpoint endpoint = 2;
-inline bool DrillPBError::has_endpoint() const {
-  return (_has_bits_[0] & 0x00000002u) != 0;
+// optional int32 value_count = 4;
+inline bool SerializedField::has_value_count() const {
+  return (_has_bits_[0] & 0x00000008u) != 0;
 }
-inline void DrillPBError::set_has_endpoint() {
-  _has_bits_[0] |= 0x00000002u;
+inline void SerializedField::set_has_value_count() {
+  _has_bits_[0] |= 0x00000008u;
 }
-inline void DrillPBError::clear_has_endpoint() {
-  _has_bits_[0] &= ~0x00000002u;
+inline void SerializedField::clear_has_value_count() {
+  _has_bits_[0] &= ~0x00000008u;
 }
-inline void DrillPBError::clear_endpoint() {
-  if (endpoint_ != NULL) endpoint_->::exec::DrillbitEndpoint::Clear();
-  clear_has_endpoint();
+inline void SerializedField::clear_value_count() {
+  value_count_ = 0;
+  clear_has_value_count();
 }
-inline const ::exec::DrillbitEndpoint& DrillPBError::endpoint() const {
-  return endpoint_ != NULL ? *endpoint_ : *default_instance_->endpoint_;
+inline ::google::protobuf::int32 SerializedField::value_count() const {
+  return value_count_;
 }
-inline ::exec::DrillbitEndpoint* DrillPBError::mutable_endpoint() {
-  set_has_endpoint();
-  if (endpoint_ == NULL) endpoint_ = new ::exec::DrillbitEndpoint;
-  return endpoint_;
+inline void SerializedField::set_value_count(::google::protobuf::int32 value) {
+  set_has_value_count();
+  value_count_ = value;
 }
-inline ::exec::DrillbitEndpoint* DrillPBError::release_endpoint() {
-  clear_has_endpoint();
-  ::exec::DrillbitEndpoint* temp = endpoint_;
-  endpoint_ = NULL;
+
+// optional int32 var_byte_length = 5;
+inline bool SerializedField::has_var_byte_length() const {
+  return (_has_bits_[0] & 0x00000010u) != 0;
+}
+inline void SerializedField::set_has_var_byte_length() {
+  _has_bits_[0] |= 0x00000010u;
+}
+inline void SerializedField::clear_has_var_byte_length() {
+  _has_bits_[0] &= ~0x00000010u;
+}
+inline void SerializedField::clear_var_byte_length() {
+  var_byte_length_ = 0;
+  clear_has_var_byte_length();
+}
+inline ::google::protobuf::int32 SerializedField::var_byte_length() const {
+  return var_byte_length_;
+}
+inline void SerializedField::set_var_byte_length(::google::protobuf::int32 value) {
+  set_has_var_byte_length();
+  var_byte_length_ = value;
+}
+
+// optional int32 group_count = 6;
+inline bool SerializedField::has_group_count() const {
+  return (_has_bits_[0] & 0x00000020u) != 0;
+}
+inline void SerializedField::set_has_group_count() {
+  _has_bits_[0] |= 0x00000020u;
+}
+inline void SerializedField::clear_has_group_count() {
+  _has_bits_[0] &= ~0x00000020u;
+}
+inline void SerializedField::clear_group_count() {
+  group_count_ = 0;
+  clear_has_group_count();
+}
+inline ::google::protobuf::int32 SerializedField::group_count() const {
+  return group_count_;
+}
+inline void SerializedField::set_group_count(::google::protobuf::int32 value) {
+  set_has_group_count();
+  group_count_ = value;
+}
+
+// optional int32 buffer_length = 7;
+inline bool SerializedField::has_buffer_length() const {
+  return (_has_bits_[0] & 0x00000040u) != 0;
+}
+inline void SerializedField::set_has_buffer_length() {
+  _has_bits_[0] |= 0x00000040u;
+}
+inline void SerializedField::clear_has_buffer_length() {
+  _has_bits_[0] &= ~0x00000040u;
+}
+inline void SerializedField::clear_buffer_length() {
+  buffer_length_ = 0;
+  clear_has_buffer_length();
+}
+inline ::google::protobuf::int32 SerializedField::buffer_length() const {
+  return buffer_length_;
+}
+inline void SerializedField::set_buffer_length(::google::protobuf::int32 value) {
+  set_has_buffer_length();
+  buffer_length_ = value;
+}
+
+// -------------------------------------------------------------------
+
+// QueryProfile
+
+// optional .exec.shared.QueryId id = 1;
+inline bool QueryProfile::has_id() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
+}
+inline void QueryProfile::set_has_id() {
+  _has_bits_[0] |= 0x00000001u;
+}
+inline void QueryProfile::clear_has_id() {
+  _has_bits_[0] &= ~0x00000001u;
+}
+inline void QueryProfile::clear_id() {
+  if (id_ != NULL) id_->::exec::shared::QueryId::Clear();
+  clear_has_id();
+}
+inline const ::exec::shared::QueryId& QueryProfile::id() const {
+  return id_ != NULL ? *id_ : *default_instance_->id_;
+}
+inline ::exec::shared::QueryId* QueryProfile::mutable_id() {
+  set_has_id();
+  if (id_ == NULL) id_ = new ::exec::shared::QueryId;
+  return id_;
+}
+inline ::exec::shared::QueryId* QueryProfile::release_id() {
+  clear_has_id();
+  ::exec::shared::QueryId* temp = id_;
+  id_ = NULL;
   return temp;
 }
-inline void DrillPBError::set_allocated_endpoint(::exec::DrillbitEndpoint* endpoint) {
-  delete endpoint_;
-  endpoint_ = endpoint;
-  if (endpoint) {
-    set_has_endpoint();
+inline void QueryProfile::set_allocated_id(::exec::shared::QueryId* id) {
+  delete id_;
+  id_ = id;
+  if (id) {
+    set_has_id();
   } else {
-    clear_has_endpoint();
+    clear_has_id();
   }
 }
 
-// optional int32 error_type = 3;
-inline bool DrillPBError::has_error_type() const {
+// optional .exec.shared.QueryType type = 2;
+inline bool QueryProfile::has_type() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
+}
+inline void QueryProfile::set_has_type() {
+  _has_bits_[0] |= 0x00000002u;
+}
+inline void QueryProfile::clear_has_type() {
+  _has_bits_[0] &= ~0x00000002u;
+}
+inline void QueryProfile::clear_type() {
+  type_ = 1;
+  clear_has_type();
+}
+inline ::exec::shared::QueryType QueryProfile::type() const {
+  return static_cast< ::exec::shared::QueryType >(type_);
+}
+inline void QueryProfile::set_type(::exec::shared::QueryType value) {
+  assert(::exec::shared::QueryType_IsValid(value));
+  set_has_type();
+  type_ = value;
+}
+
+// optional int64 start = 3;
+inline bool QueryProfile::has_start() const {
   return (_has_bits_[0] & 0x00000004u) != 0;
 }
-inline void DrillPBError::set_has_error_type() {
+inline void QueryProfile::set_has_start() {
   _has_bits_[0] |= 0x00000004u;
 }
-inline void DrillPBError::clear_has_error_type() {
+inline void QueryProfile::clear_has_start() {
   _has_bits_[0] &= ~0x00000004u;
 }
-inline void DrillPBError::clear_error_type() {
-  error_type_ = 0;
-  clear_has_error_type();
+inline void QueryProfile::clear_start() {
+  start_ = GOOGLE_LONGLONG(0);
+  clear_has_start();
 }
-inline ::google::protobuf::int32 DrillPBError::error_type() const {
-  return error_type_;
+inline ::google::protobuf::int64 QueryProfile::start() const {
+  return start_;
 }
-inline void DrillPBError::set_error_type(::google::protobuf::int32 value) {
-  set_has_error_type();
-  error_type_ = value;
+inline void QueryProfile::set_start(::google::protobuf::int64 value) {
+  set_has_start();
+  start_ = value;
 }
 
-// optional string message = 4;
-inline bool DrillPBError::has_message() const {
+// optional int64 end = 4;
+inline bool QueryProfile::has_end() const {
   return (_has_bits_[0] & 0x00000008u) != 0;
 }
-inline void DrillPBError::set_has_message() {
+inline void QueryProfile::set_has_end() {
   _has_bits_[0] |= 0x00000008u;
 }
-inline void DrillPBError::clear_has_message() {
+inline void QueryProfile::clear_has_end() {
   _has_bits_[0] &= ~0x00000008u;
 }
-inline void DrillPBError::clear_message() {
-  if (message_ != &::google::protobuf::internal::kEmptyString) {
-    message_->clear();
+inline void QueryProfile::clear_end() {
+  end_ = GOOGLE_LONGLONG(0);
+  clear_has_end();
+}
+inline ::google::protobuf::int64 QueryProfile::end() const {
+  return end_;
+}
+inline void QueryProfile::set_end(::google::protobuf::int64 value) {
+  set_has_end();
+  end_ = value;
+}
+
+// optional string query = 5;
+inline bool QueryProfile::has_query() const {
+  return (_has_bits_[0] & 0x00000010u) != 0;
+}
+inline void QueryProfile::set_has_query() {
+  _has_bits_[0] |= 0x00000010u;
+}
+inline void QueryProfile::clear_has_query() {
+  _has_bits_[0] &= ~0x00000010u;
+}
+inline void QueryProfile::clear_query() {
+  if (query_ != &::google::protobuf::internal::kEmptyString) {
+    query_->clear();
   }
-  clear_has_message();
+  clear_has_query();
+}
+inline const ::std::string& QueryProfile::query() const {
+  return *query_;
+}
+inline void QueryProfile::set_query(const ::std::string& value) {
+  set_has_query();
+  if (query_ == &::google::protobuf::internal::kEmptyString) {
+    query_ = new ::std::string;
+  }
+  query_->assign(value);
+}
+inline void QueryProfile::set_query(const char* value) {
+  set_has_query();
+  if (query_ == &::google::protobuf::internal::kEmptyString) {
+    query_ = new ::std::string;
+  }
+  query_->assign(value);
+}
+inline void QueryProfile::set_query(const char* value, size_t size) {
+  set_has_query();
+  if (query_ == &::google::protobuf::internal::kEmptyString) {
+    query_ = new ::std::string;
+  }
+  query_->assign(reinterpret_cast<const char*>(value), size);
+}
+inline ::std::string* QueryProfile::mutable_query() {
+  set_has_query();
+  if (query_ == &::google::protobuf::internal::kEmptyString) {
+    query_ = new ::std::string;
+  }
+  return query_;
+}
+inline ::std::string* QueryProfile::release_query() {
+  clear_has_query();
+  if (query_ == &::google::protobuf::internal::kEmptyString) {
+    return NULL;
+  } else {
+    ::std::string* temp = query_;
+    query_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    return temp;
+  }
+}
+inline void QueryProfile::set_allocated_query(::std::string* query) {
+  if (query_ != &::google::protobuf::internal::kEmptyString) {
+    delete query_;
+  }
+  if (query) {
+    set_has_query();
+    query_ = query;
+  } else {
+    clear_has_query();
+    query_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+  }
+}
+
+// optional string plan = 6;
+inline bool QueryProfile::has_plan() const {
+  return (_has_bits_[0] & 0x00000020u) != 0;
+}
+inline void QueryProfile::set_has_plan() {
+  _has_bits_[0] |= 0x00000020u;
+}
+inline void QueryProfile::clear_has_plan() {
+  _has_bits_[0] &= ~0x00000020u;
+}
+inline void QueryProfile::clear_plan() {
+  if (plan_ != &::google::protobuf::internal::kEmptyString) {
+    plan_->clear();
+  }
+  clear_has_plan();
 }
-inline const ::std::string& DrillPBError::message() const {
-  return *message_;
+inline const ::std::string& QueryProfile::plan() const {
+  return *plan_;
 }
-inline void DrillPBError::set_message(const ::std::string& value) {
-  set_has_message();
-  if (message_ == &::google::protobuf::internal::kEmptyString) {
-    message_ = new ::std::string;
+inline void QueryProfile::set_plan(const ::std::string& value) {
+  set_has_plan();
+  if (plan_ == &::google::protobuf::internal::kEmptyString) {
+    plan_ = new ::std::string;
   }
-  message_->assign(value);
+  plan_->assign(value);
 }
-inline void DrillPBError::set_message(const char* value) {
-  set_has_message();
-  if (message_ == &::google::protobuf::internal::kEmptyString) {
-    message_ = new ::std::string;
+inline void QueryProfile::set_plan(const char* value) {
+  set_has_plan();
+  if (plan_ == &::google::protobuf::internal::kEmptyString) {
+    plan_ = new ::std::string;
   }
-  message_->assign(value);
+  plan_->assign(value);
 }
-inline void DrillPBError::set_message(const char* value, size_t size) {
-  set_has_message();
-  if (message_ == &::google::protobuf::internal::kEmptyString) {
-    message_ = new ::std::string;
+inline void QueryProfile::set_plan(const char* value, size_t size) {
+  set_has_plan();
+  if (plan_ == &::google::protobuf::internal::kEmptyString) {
+    plan_ = new ::std::string;
   }
-  message_->assign(reinterpret_cast<const char*>(value), size);
+  plan_->assign(reinterpret_cast<const char*>(value), size);
 }
-inline ::std::string* DrillPBError::mutable_message() {
-  set_has_message();
-  if (message_ == &::google::protobuf::internal::kEmptyString) {
-    message_ = new ::std::string;
+inline ::std::string* QueryProfile::mutable_plan() {
+  set_has_plan();
+  if (plan_ == &::google::protobuf::internal::kEmptyString) {
+    plan_ = new ::std::string;
   }
-  return message_;
+  return plan_;
 }
-inline ::std::string* DrillPBError::release_message() {
-  clear_has_message();
-  if (message_ == &::google::protobuf::internal::kEmptyString) {
+inline ::std::string* QueryProfile::release_plan() {
+  clear_has_plan();
+  if (plan_ == &::google::protobuf::internal::kEmptyString) {
     return NULL;
   } else {
-    ::std::string* temp = message_;
-    message_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    ::std::string* temp = plan_;
+    plan_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
     return temp;
   }
 }
-inline void DrillPBError::set_allocated_message(::std::string* message) {
-  if (message_ != &::google::protobuf::internal::kEmptyString) {
-    delete message_;
+inline void QueryProfile::set_allocated_plan(::std::string* plan) {
+  if (plan_ != &::google::protobuf::internal::kEmptyString) {
+    delete plan_;
   }
-  if (message) {
-    set_has_message();
-    message_ = message;
+  if (plan) {
+    set_has_plan();
+    plan_ = plan;
   } else {
-    clear_has_message();
-    message_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
+    clear_has_plan();
+    plan_ = const_cast< ::std::string*>(&::google::protobuf::internal::kEmptyString);
   }
 }
 
-// repeated .exec.shared.ParsingError parsing_error = 5;
-inline int DrillPBError::parsing_error_size() const {
-  return parsing_error_.size();
+// repeated .exec.shared.MajorFragmentProfile fragment_profile = 7;
+inline int QueryProfile::fragment_profile_size() const {
+  return fragment_profile_.size();
 }
-inline void DrillPBError::clear_parsing_error() {
-  parsing_error_.Clear();
+inline void QueryProfile::clear_fragment_profile() {
+  fragment_profile_.Clear();
 }
-inline const ::exec::shared::ParsingError& DrillPBError::parsing_error(int index) const {
-  return parsing_error_.Get(index);
+inline const ::exec::shared::MajorFragmentProfile& QueryProfile::fragment_profile(int index) const {
+  return fragment_profile_.Get(index);
 }
-inline ::exec::shared::ParsingError* DrillPBError::mutable_parsing_error(int index) {
-  return parsing_error_.Mutable(index);
+inline ::exec::shared::MajorFragmentProfile* QueryProfile::mutable_fragment_profile(int index) {
+  return fragment_profile_.Mutable(index);
 }
-inline ::exec::shared::ParsingError* DrillPBError::add_parsing_error() {
-  return parsing_error_.Add();
+inline ::exec::shared::MajorFragmentProfile* QueryProfile::add_fragment_profile() {
+  return fragment_profile_.Add();
 }
-inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::ParsingError >&
-DrillPBError::parsing_error() const {
-  return parsing_error_;
+inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::MajorFragmentProfile >&
+QueryProfile::fragment_profile() const {
+  return fragment_profile_;
 }
-inline ::google::protobuf::RepeatedPtrField< ::exec::shared::ParsingError >*
-DrillPBError::mutable_parsing_error() {
-  return &parsing_error_;
+inline ::google::protobuf::RepeatedPtrField< ::exec::shared::MajorFragmentProfile >*
+QueryProfile::mutable_fragment_profile() {
+  return &fragment_profile_;
 }
 
 // -------------------------------------------------------------------
 
-// ParsingError
+// MajorFragmentProfile
 
-// optional int32 start_column = 2;
-inline bool ParsingError::has_start_column() const {
+// optional int32 major_fragment_id = 1;
+inline bool MajorFragmentProfile::has_major_fragment_id() const {
   return (_has_bits_[0] & 0x00000001u) != 0;
 }
-inline void ParsingError::set_has_start_column() {
+inline void MajorFragmentProfile::set_has_major_fragment_id() {
   _has_bits_[0] |= 0x00000001u;
 }
-inline void ParsingError::clear_has_start_column() {
+inline void MajorFragmentProfile::clear_has_major_fragment_id() {
   _has_bits_[0] &= ~0x00000001u;
 }
-inline void ParsingError::clear_start_column() {
-  start_column_ = 0;
-  clear_has_start_column();
+inline void MajorFragmentProfile::clear_major_fragment_id() {
+  major_fragment_id_ = 0;
+  clear_has_major_fragment_id();
 }
-inline ::google::protobuf::int32 ParsingError::start_column() const {
-  return start_column_;
+inline ::google::protobuf::int32 MajorFragmentProfile::major_fragment_id() const {
+  return major_fragment_id_;
 }
-inline void ParsingError::set_start_column(::google::protobuf::int32 value) {
-  set_has_start_column();
-  start_column_ = value;
+inline void MajorFragmentProfile::set_major_fragment_id(::google::protobuf::int32 value) {
+  set_has_major_fragment_id();
+  major_fragment_id_ = value;
 }
 
-// optional int32 start_row = 3;
-inline bool ParsingError::has_start_row() const {
-  return (_has_bits_[0] & 0x00000002u) != 0;
+// repeated .exec.shared.MinorFragmentProfile minor_fragment_profile = 2;
+inline int MajorFragmentProfile::minor_fragment_profile_size() const {
+  return minor_fragment_profile_.size();
 }
-inline void ParsingError::set_has_start_row() {
-  _has_bits_[0] |= 0x00000002u;
+inline void MajorFragmentProfile::clear_minor_fragment_profile() {
+  minor_fragment_profile_.Clear();
 }
-inline void ParsingError::clear_has_start_row() {
-  _has_bits_[0] &= ~0x00000002u;
+inline const ::exec::shared::MinorFragmentProfile& MajorFragmentProfile::minor_fragment_profile(int index) const {
+  return minor_fragment_profile_.Get(index);
 }
-inline void ParsingError::clear_start_row() {
-  start_row_ = 0;
-  clear_has_start_row();
+inline ::exec::shared::MinorFragmentProfile* MajorFragmentProfile::mutable_minor_fragment_profile(int index) {
+  return minor_fragment_profile_.Mutable(index);
 }
-inline ::google::protobuf::int32 ParsingError::start_row() const {
-  return start_row_;
+inline ::exec::shared::MinorFragmentProfile* MajorFragmentProfile::add_minor_fragment_profile() {
+  return minor_fragment_profile_.Add();
 }
-inline void ParsingError::set_start_row(::google::protobuf::int32 value) {
-  set_has_start_row();
-  start_row_ = value;
+inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::MinorFragmentProfile >&
+MajorFragmentProfile::minor_fragment_profile() const {
+  return minor_fragment_profile_;
+}
+inline ::google::protobuf::RepeatedPtrField< ::exec::shared::MinorFragmentProfile >*
+MajorFragmentProfile::mutable_minor_fragment_profile() {
+  return &minor_fragment_profile_;
 }
 
-// optional int32 end_column = 4;
-inline bool ParsingError::has_end_column() const {
-  return (_has_bits_[0] & 0x00000004u) != 0;
+// -------------------------------------------------------------------
+
+// MinorFragmentProfile
+
+// optional .exec.shared.FragmentState state = 1;
+inline bool MinorFragmentProfile::has_state() const {
+  return (_has_bits_[0] & 0x00000001u) != 0;
 }
-inline void ParsingError::set_has_end_column() {
-  _has_bits_[0] |= 0x00000004u;
+inline void MinorFragmentProfile::set_has_state() {
+  _has_bits_[0] |= 0x00000001u;
 }
-inline void ParsingError::clear_has_end_column() {
-  _has_bits_[0] &= ~0x00000004u;
+inline void MinorFragmentProfile::clear_has_state() {
+  _has_bits_[0] &= ~0x00000001u;
 }
-inline void ParsingError::clear_end_column() {
-  end_column_ = 0;
-  clear_has_end_column();
+inline void MinorFragmentProfile::clear_state() {
+  state_ = 0;
+  clear_has_state();
 }
-inline ::google::protobuf::int32 ParsingError::end_column() const {
-  return end_column_;
+inline ::exec::shared::FragmentState MinorFragmentProfile::state() const {
+  return static_cast< ::exec::shared::FragmentState >(state_);
 }
-inline void ParsingError::set_end_column(::google::protobuf::int32 value) {
-  set_has_end_column();
-  end_column_ = value;
+inline void MinorFragmentProfile::set_state(::exec::shared::FragmentState value) {
+  assert(::exec::shared::FragmentState_IsValid(value));
+  set_has_state();
+  state_ = value;
 }
 
-// optional int32 end_row = 5;
-inline bool ParsingError::has_end_row() const {
-  return (_has_bits_[0] & 0x00000008u) != 0;
+// optional .exec.shared.DrillPBError error = 2;
+inline bool MinorFragmentProfile::has_error() const {
+  return (_has_bits_[0] & 0x00000002u) != 0;
 }
-inline void ParsingError::set_has_end_row() {
-  _has_bits_[0] |= 0x00000008u;
+inline void MinorFragmentProfile::set_has_error() {
+  _has_bits_[0] |= 0x00000002u;
 }
-inline void ParsingError::clear_has_end_row() {
-  _has_bits_[0] &= ~0x00000008u;
+inline void MinorFragmentProfile::clear_has_error() {
+  _has_bits_[0] &= ~0x00000002u;
 }
-inline void ParsingError::clear_end_row() {
-  end_row_ = 0;
-  clear_has_end_row();
+inline void MinorFragmentProfile::clear_error() {
+  if (error_ != NULL) error_->::exec::shared::DrillPBError::Clear();
+  clear_has_error();
 }
-inline ::google::protobuf::int32 ParsingError::end_row() const {
-  return end_row_;
+inline const ::exec::shared::DrillPBError& MinorFragmentProfile::error() const {
+  return error_ != NULL ? *error_ : *default_instance_->error_;
 }
-inline void ParsingError::set_end_row(::google::protobuf::int32 value) {
-  set_has_end_row();
-  end_row_ = value;
+inline ::exec::shared::DrillPBError* MinorFragmentProfile::mutable_error() {
+  set_has_error();
+  if (error_ == NULL) error_ = new ::exec::shared::DrillPBError;
+  return error_;
+}
+inline ::exec::shared::DrillPBError* MinorFragmentProfile::release_error() {
+  clear_has_error();
+  ::exec::shared::DrillPBError* temp = error_;
+  error_ = NULL;
+  return temp;
+}
+inline void MinorFragmentProfile::set_allocated_error(::exec::shared::DrillPBError* error) {
+  delete error_;
+  error_ = error;
+  if (error) {
+    set_has_error();
+  } else {
+    clear_has_error();
+  }
 }
 
-// -------------------------------------------------------------------
-
-// RecordBatchDef
-
-// optional int32 record_count = 1;
-inline bool RecordBatchDef::has_record_count() const {
-  return (_has_bits_[0] & 0x00000001u) != 0;
+// optional int32 minor_fragment_id = 3;
+inline bool MinorFragmentProfile::has_minor_fragment_id() const {
+  return (_has_bits_[0] & 0x00000004u) != 0;
 }
-inline void RecordBatchDef::set_has_record_count() {
-  _has_bits_[0] |= 0x00000001u;
+inline void MinorFragmentProfile::set_has_minor_fragment_id() {
+  _has_bits_[0] |= 0x00000004u;
 }
-inline void RecordBatchDef::clear_has_record_count() {
-  _has_bits_[0] &= ~0x00000001u;
+inline void MinorFragmentProfile::clear_has_minor_fragment_id() {
+  _has_bits_[0] &= ~0x00000004u;
 }
-inline void RecordBatchDef::clear_record_count() {
-  record_count_ = 0;
-  clear_has_record_count();
+inline void MinorFragmentProfile::clear_minor_fragment_id() {
+  minor_fragment_id_ = 0;
+  clear_has_minor_fragment_id();
 }
-inline ::google::protobuf::int32 RecordBatchDef::record_count() const {
-  return record_count_;
+inline ::google::protobuf::int32 MinorFragmentProfile::minor_fragment_id() const {
+  return minor_fragment_id_;
 }
-inline void RecordBatchDef::set_record_count(::google::protobuf::int32 value) {
-  set_has_record_count();
-  record_count_ = value;
+inline void MinorFragmentProfile::set_minor_fragment_id(::google::protobuf::int32 value) {
+  set_has_minor_fragment_id();
+  minor_fragment_id_ = value;
 }
 
-// repeated .exec.shared.SerializedField field = 2;
-inline int RecordBatchDef::field_size() const {
-  return field_.size();
+// repeated .exec.shared.OperatorProfile operator_profile = 4;
+inline int MinorFragmentProfile::operator_profile_size() const {
+  return operator_profile_.size();
 }
-inline void RecordBatchDef::clear_field() {
-  field_.Clear();
+inline void MinorFragmentProfile::clear_operator_profile() {
+  operator_profile_.Clear();
 }
-inline const ::exec::shared::SerializedField& RecordBatchDef::field(int index) const {
-  return field_.Get(index);
+inline const ::exec::shared::OperatorProfile& MinorFragmentProfile::operator_profile(int index) const {
+  return operator_profile_.Get(index);
 }
-inline ::exec::shared::SerializedField* RecordBatchDef::mutable_field(int index) {
-  return field_.Mutable(index);
+inline ::exec::shared::OperatorProfile* MinorFragmentProfile::mutable_operator_profile(int index) {
+  return operator_profile_.Mutable(index);
 }
-inline ::exec::shared::SerializedField* RecordBatchDef::add_field() {
-  return field_.Add();
+inline ::exec::shared::OperatorProfile* MinorFragmentProfile::add_operator_profile() {
+  return operator_profile_.Add();
 }
-inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::SerializedField >&
-RecordBatchDef::field() const {
-  return field_;
+inline const ::google::protobuf::RepeatedPtrField< ::exec::shared::OperatorProfile >&
+MinorFragmentProfile::operator_profile() const {
+  return operator_profile_;
 }
-inline ::google::protobuf::RepeatedPtrField< ::exec::shared::SerializedField >*
-RecordBatchDef::mutable_field() {
-  return &field_;
+inline ::google::protobuf::RepeatedPtrField< ::exec::shared::OperatorProfile >*
+MinorFragmentProfile::mutable_operator_profile() {
+  return &operator_profile_;
 }
 
-// optional bool is_selection_vector_2 = 3;
-inline bool RecordBatchDef::has_is_selection_vector_2() const {
-  return (_has_bits_[0] & 0x00000004u) != 0;
+// optional int64 start_time = 5;
+inline bool MinorFragmentProfile::has_start_time() const {
+  return (_has_bits_[0] & 0x00000010u) != 0;
 }
-inline void RecordBatchDef::set_has_is_selection_vector_2() {
-  _has_bits_[0] |= 0x00000004u;
+inline void MinorFragmentProfile::set_has_start_time() {
+  _has_bits_[0] |= 0x00000010u;
 }
-inline void RecordBatchDef::clear_has_is_selection_vector_2() {
-  _has_bits_[0] &= ~0x00000004u;
+inline void MinorFragmentProfile::clear_has_start_time() {
+  _has_bits_[0] &= ~0x00000010u;
 }
-inline void RecordBatchDef::clear_is_selection_vector_2() {
-  is_selection_vector_2_ = false;
-  clear_has_is_selection_vector_2();
+inline void MinorFragmentProfile::clear_start_time() {
+  start_time_ = GOOGLE_LONGLONG(0);
+  clear_has_start_time();
 }
-inline bool RecordBatchDef::is_selection_vector_2() const {
-  return is_selection_vector_2_;
+inline ::google::protobuf::int64 MinorFragmentProfile::start_time() const {
+  return start_time_;
 }
-inline void RecordBatchDef::set_is_selection_vector_2(bool value) {
-  set_has_is_selection_vector_2();
-  is_selection_vector_2_ = value;
+inline void MinorFragmentProfile::set_start_time(::google::protobuf::int64 value) {
+  set_has_start_time();
+  start_time_ = value;
 }
 
-// -------------------------------------------------------------------
-
-// NamePart
-
-// optional .exec.shared.NamePart.Type type = 1;
-inline bool NamePart::has_type() const {
-  return (_has_bits_[0] & 0x00000001u) != 0;
+// optional int64 end_time = 6;
+inline bool MinorFragmentProfile::has_end_time() const {
+  return (_has_bits_[0] & 0x00000020u) != 0;
 }
-inline void NamePart::set_has_type() {
-  _has_bits_[0] |= 0x00000001u;
+inline void MinorFragmentProfile::set_has_end_time() {
+  _has_bits_[0] |= 0x00000020u;
 }
-inline void NamePart::clear_has_type() {
-  _has_bits_[0] &= ~0x00000001u;
+inline void MinorFragmentProfile::clear_has_end_time() {
+  _has_bits_[0] &= ~0x00000020u;
 }
-inline void NamePart::clear_type() {
-  type_ = 0;
-  clear_has_type();
+inline void MinorFragmentProfile::clear_end_time() {
+  end_time_ = GOOGLE_LONGLONG(0);
+  clear_has_end_time();
 }
-inline ::exec::shared::NamePart_Type NamePart::type() const {
-  return static_cast< ::exec::shared::NamePart_Type >(type_);
+inline ::google::protobuf::int64 MinorFragmentProfile::end_time() const {
+  return end_time_;
 }
-inline void NamePart::set_type(::exec::shared::NamePart_Type value) {
-  assert(::exec::shared::NameP

<TRUNCATED>

[15/32] git commit: DRILL-875: Fixes for DRILL-707, DRILL-780, DRILL-835 (Schema change), DRILL-852, DRILL-876, DRILL_877, DRILL-878, DRILL-890

Posted by ja...@apache.org.
DRILL-875: Fixes for DRILL-707, DRILL-780, DRILL-835 (Schema change), DRILL-852, DRILL-876, DRILL_877, DRILL-878, DRILL-890


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/aaa4db74
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/aaa4db74
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/aaa4db74

Branch: refs/heads/master
Commit: aaa4db74b215e03ad0e1334cfc18964972d93a3b
Parents: ff39fb8
Author: Parth Chandra <pc...@maprtech.com>
Authored: Fri May 30 11:17:40 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Thu Jun 19 20:29:53 2014 -0700

----------------------------------------------------------------------
 contrib/native/client/CMakeLists.txt            |   25 +-
 .../native/client/example/querySubmitter.cpp    |   75 +-
 contrib/native/client/readme.linux              |   96 +
 .../parquet_scan_union_screen_physical.json     |    2 +-
 .../native/client/resources/simple_plan.json    |   12 +-
 contrib/native/client/scripts/fixProtodefs.sh   |    2 +-
 .../native/client/src/clientlib/CMakeLists.txt  |   15 +-
 .../client/src/clientlib/decimalUtils.cpp       |    2 +-
 .../native/client/src/clientlib/drillClient.cpp |  126 +-
 .../client/src/clientlib/drillClientImpl.cpp    |  539 +++-
 .../client/src/clientlib/drillClientImpl.hpp    |  163 +-
 contrib/native/client/src/clientlib/errmsgs.cpp |    6 +-
 contrib/native/client/src/clientlib/errmsgs.hpp |   44 +-
 contrib/native/client/src/clientlib/logger.cpp  |   69 +
 contrib/native/client/src/clientlib/logger.hpp  |   72 +
 .../native/client/src/clientlib/recordBatch.cpp |  109 +-
 .../native/client/src/clientlib/rpcDecoder.cpp  |    2 +
 .../native/client/src/clientlib/rpcEncoder.cpp  |    3 +
 contrib/native/client/src/clientlib/utils.hpp   |   47 +
 .../native/client/src/include/drill/common.hpp  |   25 +-
 .../client/src/include/drill/drillClient.hpp    |   73 +-
 .../client/src/include/drill/protobuf/User.pb.h |   40 +-
 .../client/src/include/drill/recordBatch.hpp    |  100 +-
 .../native/client/src/protobuf/BitControl.pb.cc |  458 +--
 .../native/client/src/protobuf/BitControl.pb.h  |  370 +--
 .../native/client/src/protobuf/CMakeLists.txt   |   56 +-
 contrib/native/client/src/protobuf/User.pb.cc   |   80 +-
 .../client/src/protobuf/UserBitShared.pb.cc     | 2618 ++++++++++++++-
 .../client/src/protobuf/UserBitShared.pb.h      | 2969 ++++++++++++++----
 29 files changed, 6260 insertions(+), 1938 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/contrib/native/client/CMakeLists.txt b/contrib/native/client/CMakeLists.txt
index a306780..9ac705b 100644
--- a/contrib/native/client/CMakeLists.txt
+++ b/contrib/native/client/CMakeLists.txt
@@ -15,7 +15,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-cmake_minimum_required(VERSION 2.8)
+cmake_minimum_required(VERSION 2.6)
 
 project(drillclient)
 
@@ -26,20 +26,21 @@ set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmakeModules/")
 
 
 # Find Boost
-set(Boost_USE_STATIC_LIBS OFF) 
-set(Boost_USE_MULTITHREADED ON)  
-set(Boost_USE_STATIC_RUNTIME OFF) 
-find_package(Boost REQUIRED COMPONENTS regex system date_time chrono thread log log_setup)
+set(Boost_USE_STATIC_LIBS OFF)
+set(Boost_USE_MULTITHREADED ON)
+set(Boost_USE_STATIC_RUNTIME OFF)
+find_package(Boost 1.53.0 REQUIRED COMPONENTS regex system date_time chrono thread )
 include_directories(${Boost_INCLUDE_DIRS})
 
-if(CMAKE_COMPILER_IS_GNUCXX) 
-    set(CMAKE_EXE_LINKER_FLAGS "-lrt -lpthread")                                                   
-endif()    
+if(CMAKE_COMPILER_IS_GNUCXX)
+    set(CMAKE_EXE_LINKER_FLAGS "-lrt -lpthread")
+    set(CMAKE_CXX_FLAGS "-fPIC")
+endif()
 
 add_definitions(-DBOOST_ALL_DYN_LINK)
 
 # Find Protobufs
-find_package(Protobuf REQUIRED)
+find_package(Protobuf REQUIRED )
 include_directories(${PROTOBUF_INCLUDE_DIR})
 
 #Find Zookeeper
@@ -50,13 +51,13 @@ find_package(Zookeeper  REQUIRED )
 #
 
 # Preprocess to fix protobuf .proto definitions
-add_subdirectory("${CMAKE_SOURCE_DIR}/src/protobuf") 
+add_subdirectory("${CMAKE_SOURCE_DIR}/src/protobuf")
 # protobuf includes are required by clientlib
 include_directories(${ProtoHeadersDir})
 include_directories(${ProtoIncludesDir})
 
 # Build the Client Library as a shared library
-add_subdirectory("${CMAKE_SOURCE_DIR}/src/clientlib") 
+add_subdirectory("${CMAKE_SOURCE_DIR}/src/clientlib")
 include_directories(${CMAKE_SOURCE_DIR}/src/include ${Zookeeper_INCLUDE_DIRS}  )
 
 # add a DEBUG preprocessor macro
@@ -68,7 +69,7 @@ set_property(
 # Link directory
 link_directories(/usr/local/lib)
 
-add_executable(querySubmitter example/querySubmitter.cpp ) 
+add_executable(querySubmitter example/querySubmitter.cpp )
 
 target_link_libraries(querySubmitter ${Boost_LIBRARIES} ${PROTOBUF_LIBRARY} drillClient protomsgs )
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/example/querySubmitter.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/example/querySubmitter.cpp b/contrib/native/client/example/querySubmitter.cpp
index 96e2c65..9d24e68 100644
--- a/contrib/native/client/example/querySubmitter.cpp
+++ b/contrib/native/client/example/querySubmitter.cpp
@@ -20,9 +20,23 @@
 #include <iostream>
 #include <stdio.h>
 #include <stdlib.h>
-#include <boost/asio.hpp>
 #include "drill/drillc.hpp"
 
+Drill::status_t SchemaListener(void* ctx, Drill::FieldDefPtr fields, Drill::DrillClientError* err){
+    if(!err){
+        printf("SCHEMA CHANGE DETECTED:\n");
+        for(size_t i=0; i<fields->size(); i++){
+            std::string name= fields->at(i)->getName();
+            printf("%s\t", name.c_str());
+        }
+        printf("\n");
+        return Drill::QRY_SUCCESS ;
+    }else{
+        std::cerr<< "ERROR: " << err->msg << std::endl;
+        return Drill::QRY_FAILURE;
+    }
+}
+
 Drill::status_t QueryResultsListener(void* ctx, Drill::RecordBatch* b, Drill::DrillClientError* err){
     if(!err){
         b->print(std::cout, 0); // print all rows
@@ -79,18 +93,19 @@ void print(const Drill::FieldMetadata* pFieldMetadata, void* buf, size_t sz){
     return;
 }
 
-int nOptions=5;
+int nOptions=6;
 
 struct Option{
     char name[32];
     char desc[128];
     bool required;
-}qsOptions[]= { 
+}qsOptions[]= {
     {"plan", "Plan files separated by semicolons", false},
     {"query", "Query strings, separated by semicolons", false},
     {"type", "Query type [physical|logical|sql]", true},
     {"connectStr", "Connect string", true},
-    {"api", "API type [sync|async]", true}
+    {"api", "API type [sync|async]", true},
+    {"logLevel", "Logging level [trace|debug|info|warn|error|fatal]", false}
 };
 
 std::map<std::string, std::string> qsOptionValues;
@@ -136,7 +151,7 @@ int parseArgs(int argc, char* argv[]){
             }
         }
     }
-    if(error){ 
+    if(error){
         printUsage();
         exit(1);
     }
@@ -170,7 +185,7 @@ int readPlans(const std::string& planList, std::vector<std::string>& plans){
         std::string plan((std::istreambuf_iterator<char>(f)), (std::istreambuf_iterator<char>()));
         std::cout << "plan:" << plan << std::endl;
         plans.push_back(plan);
-    } 
+    }
     return 0;
 }
 
@@ -201,6 +216,18 @@ bool validate(const std::string& type, const std::string& query, const std::stri
         return true;
 }
 
+Drill::logLevel_t getLogLevel(const char *s){
+    if(s!=NULL){
+        if(!strcmp(s, "trace")) return Drill::LOG_TRACE;
+        if(!strcmp(s, "debug")) return Drill::LOG_DEBUG;
+        if(!strcmp(s, "info")) return Drill::LOG_INFO;
+        if(!strcmp(s, "warn")) return Drill::LOG_WARNING;
+        if(!strcmp(s, "error")) return Drill::LOG_ERROR;
+        if(!strcmp(s, "fatal")) return Drill::LOG_FATAL;
+    }
+    return Drill::LOG_ERROR;
+}
+
 int main(int argc, char* argv[]) {
     try {
 
@@ -213,26 +240,29 @@ int main(int argc, char* argv[]) {
         std::string planList=qsOptionValues["plan"];
         std::string api=qsOptionValues["api"];
         std::string type_str=qsOptionValues["type"];
+        std::string logLevel=qsOptionValues["logLevel"];
 
-        exec::user::QueryType type;
+        exec::shared::QueryType type;
 
         if(!validate(type_str, queryList, planList)){
             exit(1);
         }
 
+        Drill::logLevel_t l=getLogLevel(logLevel.c_str());
+
         std::vector<std::string> queryInputs;
         if(type_str=="sql" ){
             readQueries(queryList, queryInputs);
-            type=exec::user::SQL;
+            type=exec::shared::SQL;
         }else if(type_str=="physical" ){
             readPlans(planList, queryInputs);
-            type=exec::user::PHYSICAL;
+            type=exec::shared::PHYSICAL;
         }else if(type_str == "logical"){
             readPlans(planList, queryInputs);
-            type=exec::user::LOGICAL;
+            type=exec::shared::LOGICAL;
         }else{
             readQueries(queryList, queryInputs);
-            type=exec::user::SQL;
+            type=exec::shared::SQL;
         }
 
         std::vector<std::string>::iterator queryInpIter;
@@ -245,9 +275,9 @@ int main(int argc, char* argv[]) {
 
         Drill::DrillClient client;
         // To log to file
-        //DrillClient::initLogging("/var/log/drill/", LOG_INFO);
+        //DrillClient::initLogging("/var/log/drill/", l);
         // To log to stderr
-        Drill::DrillClient::initLogging(NULL, Drill::LOG_INFO);
+        Drill::DrillClient::initLogging(NULL, l);
 
         if(client.connect(connectStr.c_str())!=Drill::CONN_SUCCESS){
             std::cerr<< "Failed to connect with error: "<< client.getError() << " (Using:"<<connectStr<<")"<<std::endl;
@@ -269,26 +299,27 @@ int main(int argc, char* argv[]) {
                 // get fields.
                 row=0;
                 Drill::RecordIterator* pRecIter=*recordIterIter;
-                std::vector<Drill::FieldMetadata*>& fields = pRecIter->getColDefs();
-                while((ret=pRecIter->next())==Drill::QRY_SUCCESS){
+                Drill::FieldDefPtr fields= pRecIter->getColDefs();
+                while((ret=pRecIter->next()), ret==Drill::QRY_SUCCESS || ret==Drill::QRY_SUCCESS_WITH_INFO){
+                    fields = pRecIter->getColDefs();
                     row++;
-                    if(row%4095==0){
-                        for(size_t i=0; i<fields.size(); i++){
-                            std::string name= fields[i]->getName();
+                    if( (ret==Drill::QRY_SUCCESS_WITH_INFO  && pRecIter->hasSchemaChanged() )|| ( row%100==1)){
+                        for(size_t i=0; i<fields->size(); i++){
+                            std::string name= fields->at(i)->getName();
                             printf("%s\t", name.c_str());
                         }
                         printf("\n");
                     }
                     printf("ROW: %ld\t", row);
-                    for(size_t i=0; i<fields.size(); i++){
+                    for(size_t i=0; i<fields->size(); i++){
                         void* pBuf; size_t sz;
                         pRecIter->getCol(i, &pBuf, &sz);
-                        print(fields[i], pBuf, sz);
+                        print(fields->at(i), pBuf, sz);
                     }
                     printf("\n");
                 }
                 if(ret!=Drill::QRY_NO_MORE_DATA){
-                    std::cerr<< pRecIter->getError();
+                    std::cerr<< pRecIter->getError() << std::endl;
                 }
                 client.freeQueryIterator(&pRecIter);
             }
@@ -296,11 +327,13 @@ int main(int argc, char* argv[]) {
             for(queryInpIter = queryInputs.begin(); queryInpIter != queryInputs.end(); queryInpIter++) {
                 Drill::QueryHandle_t* qryHandle = new Drill::QueryHandle_t;
                 client.submitQuery(type, *queryInpIter, QueryResultsListener, NULL, qryHandle);
+                client.registerSchemaChangeListener(qryHandle, SchemaListener);
                 queryHandles.push_back(qryHandle);
             }
             client.waitForResults();
             for(queryHandleIter = queryHandles.begin(); queryHandleIter != queryHandles.end(); queryHandleIter++) {
                 client.freeQueryResources(*queryHandleIter);
+                delete *queryHandleIter;
             }
         }
         client.close();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/readme.linux
----------------------------------------------------------------------
diff --git a/contrib/native/client/readme.linux b/contrib/native/client/readme.linux
new file mode 100644
index 0000000..fbdb6e4
--- /dev/null
+++ b/contrib/native/client/readme.linux
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+CentOS 6.5 build
+
+Install Prerequisites
+---------------------
+0) Install development tools
+    $>yum groupinstall 'Development Tools'
+
+1) CMAKE 2.8
+    $> yum install cmake28
+
+2.1) Download protobuf 2.5 from :
+    http://rpm.pbone.net/index.php3/stat/4/idpl/23552166/dir/centos_6/com/protobuf-2.5.0-16.1.x86_64.rpm.html
+    http://rpm.pbone.net/index.php3/stat/4/idpl/23552167/dir/centos_6/com/protobuf-compiler-2.5.0-16.1.x86_64.rpm.html
+    http://rpm.pbone.net/index.php3/stat/4/idpl/23552169/dir/centos_6/com/protobuf-devel-2.5.0-16.1.x86_64.rpm.html
+
+2.2) Install Protobufs
+    $> sudo yum install protobuf
+    $> sudo yum install protobuf-compiler
+    $> sudo yum install protobuf-devel
+
+3)
+3.1) Install Zookeeper prerequisites
+    - autoconf 2.59 or greater (should be installed with dev tools)
+    - cppunit 1.10.x or higher
+
+3.1.1) install cppuint
+    $> sudo yum install cppunit
+    $> sudo yum install cppunit-devel
+
+3.2) Download Zookeeper from :
+    - http://apache.mirror.quintex.com/zookeeper/zookeeper-3.4.6/
+    - untar and then follow instructions in ZOOKEEPER_DIR/src/c/README to build and install the client libs
+
+3.3) run autoreconf
+    $> autoreconf -if
+
+3.4) Build Zookeeper libs
+    $> ./configure --enable-debug --with-syncapi --enable-static --enable-shared
+    $> make && sudo make install
+
+4) Install boost. The minumim version required is 1.53, which will probably have to be built from source
+
+    # Remove any previous boost
+    $> sudo yum -y erase boost
+
+    # fetch the boost source rpm and create binary rpms
+    $> wget ftp://ftp.icm.edu.pl/vol/rzm2/linux-fedora-secondary/development/rawhide/source/SRPMS/b/boost-1.53.0-6.fc19.src.rpm
+    $> rpmbuild --rebuild boost-1.53.0-6.fc19.src.rpm
+
+    #install the binary rpms
+    #(Note: the "rpm" utility does not clean up old versions very well.)
+    $> sudo yum -y install ~/rpmbuild/RPMS/x86_64/*
+
+OR 
+    Download and build using boost build. 
+    See this link for how to build: http://www.boost.org/doc/libs/1_53_0/more/getting_started/unix-variants.html#prepare-to-use-a-boost-library-binary 
+    
+
+Build drill client
+-------------------
+    $> cd DRILL_DIR/contrib/native/client
+    $> mkdir build
+    $> cd build && cmake28 -G "Unix Makefiles" -D CMAKE_BUILD_TYPE=Debug ..
+    $> make
+
+Test
+----
+Run query submitter from the command line
+    $> querySubmitter query='select * from dfs.`/Users/pchandra/work/data/tpc-h/customer.parquet`' type=sql connectStr=local=10.250.0.146:31010 api=async logLevel=trace
+
+Valgrind
+--------
+Examples to run Valgrind and see the log in Valkyrie
+    $> valgrind --leak-check=yes --xml=yes --xml-file=qs-vg-log-a.xml querySubmitter query='select LINEITEM from dfs.`/Users/pchandra/work/data/tpc-h/customer.parquet`' type=sql connectStr=local=10.250.0.146:31010 api=async logLevel=trace
+    $> valkyrie -l qs-vg-log-a.xml
+
+

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/resources/parquet_scan_union_screen_physical.json
----------------------------------------------------------------------
diff --git a/contrib/native/client/resources/parquet_scan_union_screen_physical.json b/contrib/native/client/resources/parquet_scan_union_screen_physical.json
index e677b15..81a62a3 100644
--- a/contrib/native/client/resources/parquet_scan_union_screen_physical.json
+++ b/contrib/native/client/resources/parquet_scan_union_screen_physical.json
@@ -34,7 +34,7 @@
         @id: 3,
         child: 2,
         pop: "screen"
-      } 
+      }
     ]
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/resources/simple_plan.json
----------------------------------------------------------------------
diff --git a/contrib/native/client/resources/simple_plan.json b/contrib/native/client/resources/simple_plan.json
index ad3cdc7..8fede75 100644
--- a/contrib/native/client/resources/simple_plan.json
+++ b/contrib/native/client/resources/simple_plan.json
@@ -13,7 +13,7 @@
     cp: {type: "classpath"}
   },
   query:[
-    
+
                 {
                   @id:"1",
                   op: "scan",
@@ -38,7 +38,7 @@
                   input:"2",
                   op: "filter",
                   expr: "donuts.ppu < 1.00"
-                }, 
+                },
                 {
                   @id:"4",
                   input:"3",
@@ -51,7 +51,7 @@
                   input:"4",
                   op: "collapsingaggregate",
                   within: "ppusegment",
-                  carryovers: ["donuts.ppu"], 
+                  carryovers: ["donuts.ppu"],
                   aggregations: [
                     { ref: "donuts.typeCount",  expr: "count(1)" },
                     { ref: "donuts.quantity",  expr: "sum(quantity)" },
@@ -65,7 +65,7 @@
                   orderings: [
                     {order: "DESC", expr: "donuts.ppu" }
                   ]
-                }, 
+                },
                 {
                   @id:"7",
                   input:"6",
@@ -80,7 +80,7 @@
                   op: "limit",
           first: 0,
           last: 100
-        }, 
+        },
                 {
                   @id:"9",
                   input:"8",
@@ -88,7 +88,7 @@
                   memo: "output sink",
                   storageengine: "console",
                   target: {pipe: "STD_OUT"}
-                }      
+                }
   ]
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/scripts/fixProtodefs.sh
----------------------------------------------------------------------
diff --git a/contrib/native/client/scripts/fixProtodefs.sh b/contrib/native/client/scripts/fixProtodefs.sh
index 7cb9710..f3ce781 100755
--- a/contrib/native/client/scripts/fixProtodefs.sh
+++ b/contrib/native/client/scripts/fixProtodefs.sh
@@ -43,7 +43,7 @@ main() {
 
         if [ -e ${TARGDIR}/${FNAME} ]
         then
-            if [ ${SRCDIR}/${FNAME} -nt ${TARGDIR}/${FNAME} ] 
+            if [ ${SRCDIR}/${FNAME} -nt ${TARGDIR}/${FNAME} ]
             then
                 fixFile ${FNAME}
             fi

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/CMakeLists.txt b/contrib/native/client/src/clientlib/CMakeLists.txt
index d07f930..7cd5dfb 100644
--- a/contrib/native/client/src/clientlib/CMakeLists.txt
+++ b/contrib/native/client/src/clientlib/CMakeLists.txt
@@ -18,14 +18,15 @@
 
 # Drill Client library
 
-set (CLIENTLIB_SRC_FILES 
+set (CLIENTLIB_SRC_FILES
     ${CMAKE_CURRENT_SOURCE_DIR}/decimalUtils.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/drillClient.cpp 
-    ${CMAKE_CURRENT_SOURCE_DIR}/drillClientImpl.cpp 
-    ${CMAKE_CURRENT_SOURCE_DIR}/recordBatch.cpp 
-    ${CMAKE_CURRENT_SOURCE_DIR}/rpcEncoder.cpp 
-    ${CMAKE_CURRENT_SOURCE_DIR}/rpcDecoder.cpp 
-    ${CMAKE_CURRENT_SOURCE_DIR}/errmsgs.cpp 
+    ${CMAKE_CURRENT_SOURCE_DIR}/drillClient.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/drillClientImpl.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/recordBatch.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/rpcEncoder.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/rpcDecoder.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/errmsgs.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/logger.cpp
     )
 
 include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/../include )

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/decimalUtils.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/decimalUtils.cpp b/contrib/native/client/src/clientlib/decimalUtils.cpp
index 39439c5..3885faa 100644
--- a/contrib/native/client/src/clientlib/decimalUtils.cpp
+++ b/contrib/native/client/src/clientlib/decimalUtils.cpp
@@ -60,7 +60,7 @@ DecimalValue getDecimalValueFromByteBuf(SlicedByteBuf& data, size_t startIndex,
     bool needsEndiannessSwap = !truncateScale;
 
     // Initialize the BigDecimal, first digit in the ByteBuf has the sign so mask it out
-    cpp_int decimalDigits = (needsEndiannessSwap ? 
+    cpp_int decimalDigits = (needsEndiannessSwap ?
             bswap_32(data.getUint32(startIndex)) & 0x7FFFFFFF :
             (data.getUint32(startIndex) & 0x7FFFFFFF));
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/drillClient.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/drillClient.cpp b/contrib/native/client/src/clientlib/drillClient.cpp
index 9a9b919..10480b4 100644
--- a/contrib/native/client/src/clientlib/drillClient.cpp
+++ b/contrib/native/client/src/clientlib/drillClient.cpp
@@ -17,17 +17,11 @@
  */
 
 
-#include <boost/format.hpp>
-#include <boost/log/expressions.hpp>
-#include <boost/log/sinks/text_file_backend.hpp>
-#include <boost/log/utility/setup/file.hpp>
-#include <boost/log/utility/setup/common_attributes.hpp>
-#include <boost/log/sources/severity_logger.hpp>
-
 #include "drill/drillClient.hpp"
 #include "drill/recordBatch.hpp"
 #include "drillClientImpl.hpp"
 #include "errmsgs.hpp"
+#include "logger.hpp"
 
 #include "Types.pb.h"
 
@@ -50,84 +44,96 @@ DrillClientInitializer::~DrillClientInitializer(){
 
 logLevel_t DrillClientConfig::s_logLevel=LOG_ERROR;
 uint64_t DrillClientConfig::s_bufferLimit=-1;
-boost::mutex DrillClientConfig::s_mutex; 
+int32_t DrillClientConfig::s_socketTimeout=180;
+boost::mutex DrillClientConfig::s_mutex;
 
 DrillClientConfig::DrillClientConfig(){
     initLogging(NULL);
 }
 
+DrillClientConfig::~DrillClientConfig(){
+    Logger::close();
+}
+
 void DrillClientConfig::initLogging(const char* path){
-    if(path!=NULL){
-        std::string f=std::string(path)+"drill_clientlib_%N.log";
-        try{
-            boost::log::add_file_log
-                (
-                 boost::log::keywords::file_name = f.c_str(),
-                 boost::log::keywords::rotation_size = 10 * 1024 * 1024,
-                 boost::log::keywords::time_based_rotation = 
-                 boost::log::sinks::file::rotation_at_time_point(0, 0, 0),
-                 boost::log::keywords::format = "[%TimeStamp%]: %Message%"
-                );
-        }catch(std::exception& e){
-            // do nothing. Logging will happen to stderr
-            BOOST_LOG_TRIVIAL(error) << "Logging could not be initialized. Logging to stderr." ;
-        }
-    }
-    boost::log::add_common_attributes();
-    boost::log::core::get()->set_filter(boost::log::trivial::severity >= s_logLevel);
+    Logger::init(path);
 }
 
 void DrillClientConfig::setLogLevel(logLevel_t l){
-    boost::lock_guard<boost::mutex> bufferLock(DrillClientConfig::s_mutex);
+    boost::lock_guard<boost::mutex> configLock(DrillClientConfig::s_mutex);
     s_logLevel=l;
-    boost::log::core::get()->set_filter(boost::log::trivial::severity >= s_logLevel);
+    Logger::s_level=l;
+    //boost::log::core::get()->set_filter(boost::log::trivial::severity >= s_logLevel);
 }
 
 void DrillClientConfig::setBufferLimit(uint64_t l){
-    boost::lock_guard<boost::mutex> bufferLock(DrillClientConfig::s_mutex);
+    boost::lock_guard<boost::mutex> configLock(DrillClientConfig::s_mutex);
     s_bufferLimit=l;
 }
 
 uint64_t DrillClientConfig::getBufferLimit(){
-    boost::lock_guard<boost::mutex> bufferLock(DrillClientConfig::s_mutex);
+    boost::lock_guard<boost::mutex> configLock(DrillClientConfig::s_mutex);
     return s_bufferLimit;
 }
 
+void DrillClientConfig::setSocketTimeout(int32_t t){
+    boost::lock_guard<boost::mutex> configLock(DrillClientConfig::s_mutex);
+    s_socketTimeout=t;
+}
+
+int32_t DrillClientConfig::getSocketTimeout(){
+    boost::lock_guard<boost::mutex> configLock(DrillClientConfig::s_mutex);
+    return s_socketTimeout;
+}
+
 logLevel_t DrillClientConfig::getLogLevel(){
-    boost::lock_guard<boost::mutex> bufferLock(DrillClientConfig::s_mutex);
+    boost::lock_guard<boost::mutex> configLock(DrillClientConfig::s_mutex);
     return s_logLevel;
 }
 
 RecordIterator::~RecordIterator(){
     if(m_pColDefs!=NULL){
-        for(std::vector<Drill::FieldMetadata*>::iterator it=m_pColDefs->begin(); 
-                it!=m_pColDefs->end(); 
+        for(std::vector<Drill::FieldMetadata*>::iterator it=m_pColDefs->begin();
+                it!=m_pColDefs->end();
                 ++it){
             delete *it;
         }
     }
-    delete this->m_pColDefs;
-    this->m_pColDefs=NULL;
     delete this->m_pQueryResult;
     this->m_pQueryResult=NULL;
+    if(this->m_pCurrentRecordBatch!=NULL){
+        DRILL_LOG(LOG_TRACE) << "Deleted last Record batch " << (void*) m_pCurrentRecordBatch << std::endl;
+        delete this->m_pCurrentRecordBatch; this->m_pCurrentRecordBatch=NULL;
+    }
 }
 
-std::vector<Drill::FieldMetadata*>&  RecordIterator::getColDefs(){
+FieldDefPtr RecordIterator::getColDefs(){
     if(m_pQueryResult->hasError()){
         return DrillClientQueryResult::s_emptyColDefs;
     }
     //NOTE: if query is cancelled, return whatever you have. Client applications job to deal with it.
-    if(this->m_pColDefs==NULL){
+    if(this->m_pColDefs==NULL || this->hasSchemaChanged()){
         if(this->m_pCurrentRecordBatch==NULL){
             this->m_pQueryResult->waitForData();
             if(m_pQueryResult->hasError()){
                 return DrillClientQueryResult::s_emptyColDefs;
             }
         }
-        std::vector<Drill::FieldMetadata*>* pColDefs = new std::vector<Drill::FieldMetadata*>;
+        if(this->hasSchemaChanged()){
+            if(m_pColDefs!=NULL){
+                for(std::vector<Drill::FieldMetadata*>::iterator it=m_pColDefs->begin();
+                        it!=m_pColDefs->end();
+                        ++it){
+                    delete *it;
+                }
+                m_pColDefs->clear();
+                //delete m_pColDefs; m_pColDefs=NULL;
+            }
+        }
+        FieldDefPtr pColDefs(  new std::vector<Drill::FieldMetadata*>);
         {   //lock after we come out of the  wait.
             boost::lock_guard<boost::mutex> bufferLock(this->m_recordBatchMutex);
-            std::vector<Drill::FieldMetadata*>&  currentColDefs=DrillClientQueryResult::s_emptyColDefs;
+            boost::shared_ptr< std::vector<Drill::FieldMetadata*> >  currentColDefs=DrillClientQueryResult::s_emptyColDefs;
             if(this->m_pCurrentRecordBatch!=NULL){
                 currentColDefs=this->m_pCurrentRecordBatch->getColumnDefs();
             }else{
@@ -138,7 +144,7 @@ std::vector<Drill::FieldMetadata*>&  RecordIterator::getColDefs(){
                     currentColDefs=pR->getColumnDefs();
                 }
             }
-            for(std::vector<Drill::FieldMetadata*>::iterator it=currentColDefs.begin(); it!=currentColDefs.end(); ++it){
+            for(std::vector<Drill::FieldMetadata*>::iterator it=currentColDefs->begin(); it!=currentColDefs->end(); ++it){
                 Drill::FieldMetadata* fmd= new Drill::FieldMetadata;
                 fmd->copy(*(*it));//Yup, that's 2 stars
                 pColDefs->push_back(fmd);
@@ -146,7 +152,7 @@ std::vector<Drill::FieldMetadata*>&  RecordIterator::getColDefs(){
         }
         this->m_pColDefs = pColDefs;
     }
-    return *this->m_pColDefs;
+    return this->m_pColDefs;
 }
 
 status_t RecordIterator::next(){
@@ -160,12 +166,19 @@ status_t RecordIterator::next(){
     if(!this->m_pQueryResult->isCancelled()){
         if(this->m_pCurrentRecordBatch==NULL || this->m_currentRecord==this->m_pCurrentRecordBatch->getNumRecords()){
             boost::lock_guard<boost::mutex> bufferLock(this->m_recordBatchMutex);
-            delete this->m_pCurrentRecordBatch; //free the previous record batch
+            if(this->m_pCurrentRecordBatch !=NULL){
+                DRILL_LOG(LOG_TRACE) << "Deleted old Record batch " << (void*) m_pCurrentRecordBatch << std::endl;
+                delete this->m_pCurrentRecordBatch; //free the previous record batch
+            }
             this->m_currentRecord=0;
             this->m_pCurrentRecordBatch=this->m_pQueryResult->getNext();
-            BOOST_LOG_TRIVIAL(trace) << "Fetched new Record batch " ;
+            if(this->m_pCurrentRecordBatch != NULL){
+                DRILL_LOG(LOG_TRACE) << "Fetched new Record batch " << std::endl;
+            }else{
+                DRILL_LOG(LOG_TRACE) << "No new Record batch found " << std::endl;
+            }
             if(this->m_pCurrentRecordBatch==NULL || this->m_pCurrentRecordBatch->getNumRecords()==0){
-                BOOST_LOG_TRIVIAL(trace) << "No more data." ;
+                DRILL_LOG(LOG_TRACE) << "No more data." << std::endl;
                 ret = QRY_NO_MORE_DATA;
             }else if(this->m_pCurrentRecordBatch->hasSchemaChanged()){
                 ret=QRY_SUCCESS_WITH_INFO;
@@ -213,11 +226,16 @@ status_t RecordIterator::cancel(){
     return QRY_CANCEL;
 }
 
-void RecordIterator::registerSchemaChangeListener(pfnSchemaListener* l){
-    //TODO:
+bool RecordIterator::hasSchemaChanged(){
+    return m_currentRecord==0 && m_pCurrentRecordBatch!=NULL && m_pCurrentRecordBatch->hasSchemaChanged();
 }
 
-std::string& RecordIterator::getError(){
+void RecordIterator::registerSchemaChangeListener(pfnSchemaListener l){
+    assert(m_pQueryResult!=NULL);
+    this->m_pQueryResult->registerSchemaChangeListener(l);
+}
+
+const std::string& RecordIterator::getError(){
     return m_pQueryResult->getError()->msg;
 }
 
@@ -243,7 +261,7 @@ connectionStatus_t DrillClient::connect(const char* connectStr ){
     ret=this->m_pImpl->connect(connectStr);
 
     if(ret==CONN_SUCCESS)
-        ret=this->m_pImpl->ValidateHandShake()?CONN_SUCCESS:CONN_HANDSHAKE_FAILED;
+        ret=this->m_pImpl->validateHandShake()?CONN_SUCCESS:CONN_HANDSHAKE_FAILED;
     return ret;
 
 }
@@ -256,13 +274,13 @@ void DrillClient::close() {
     this->m_pImpl->Close();
 }
 
-status_t DrillClient::submitQuery(exec::user::QueryType t, const std::string& plan, pfnQueryResultsListener listener, void* listenerCtx, QueryHandle_t* qHandle){
+status_t DrillClient::submitQuery(::exec::shared::QueryType t, const std::string& plan, pfnQueryResultsListener listener, void* listenerCtx, QueryHandle_t* qHandle){
     DrillClientQueryResult* pResult=this->m_pImpl->SubmitQuery(t, plan, listener, listenerCtx);
     *qHandle=(QueryHandle_t)pResult;
-    return QRY_SUCCESS; 
+    return QRY_SUCCESS;
 }
 
-RecordIterator* DrillClient::submitQuery(exec::user::QueryType t, const std::string& plan, DrillClientError* err){
+RecordIterator* DrillClient::submitQuery(::exec::shared::QueryType t, const std::string& plan, DrillClientError* err){
     RecordIterator* pIter=NULL;
     DrillClientQueryResult* pResult=this->m_pImpl->SubmitQuery(t, plan, NULL, NULL);
     if(pResult){
@@ -280,6 +298,12 @@ void DrillClient::waitForResults(){
     this->m_pImpl->waitForResults();
 }
 
+void DrillClient::registerSchemaChangeListener(QueryHandle_t* handle, pfnSchemaListener l){
+    if(handle!=NULL){
+        ((DrillClientQueryResult*)(*handle))->registerSchemaChangeListener(l);
+    }
+}
+
 void DrillClient::freeQueryResources(QueryHandle_t* handle){
     delete (DrillClientQueryResult*)(*handle);
     *handle=NULL;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/clientlib/drillClientImpl.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/drillClientImpl.cpp b/contrib/native/client/src/clientlib/drillClientImpl.cpp
index 14b4f7d..0767396 100644
--- a/contrib/native/client/src/clientlib/drillClientImpl.cpp
+++ b/contrib/native/client/src/clientlib/drillClientImpl.cpp
@@ -20,53 +20,73 @@
 #include <string.h>
 #include <boost/asio.hpp>
 #include <boost/bind.hpp>
+#include <boost/date_time/posix_time/posix_time_duration.hpp>
 #include <boost/lexical_cast.hpp>
 #include <boost/thread.hpp>
-#include <boost/log/trivial.hpp>
 #include <zookeeper/zookeeper.h>
 
 #include "drill/drillClient.hpp"
 #include "drill/recordBatch.hpp"
 #include "drillClientImpl.hpp"
 #include "errmsgs.hpp"
+#include "logger.hpp"
 #include "rpcEncoder.hpp"
 #include "rpcDecoder.hpp"
 #include "rpcMessage.hpp"
+#include "utils.hpp"
 
 #include "GeneralRPC.pb.h"
 #include "UserBitShared.pb.h"
 
-#ifdef DEBUG
-#define BOOST_ASIO_ENABLE_HANDLER_TRACKING
-#endif
-
-
 namespace Drill{
 
 RpcEncoder DrillClientImpl::s_encoder;
 RpcDecoder DrillClientImpl::s_decoder;
 
 std::string debugPrintQid(const exec::shared::QueryId& qid){
-    return std::string("[")+boost::lexical_cast<std::string>(qid.part1()) +std::string(":") + boost::lexical_cast<std::string>(qid.part2())+std::string("] "); 
+    return std::string("[")+boost::lexical_cast<std::string>(qid.part1()) +std::string(":") + boost::lexical_cast<std::string>(qid.part2())+std::string("] ");
 }
 
-void DrillClientImpl::parseConnectStr(const char* connectStr, std::string& protocol, std::string& hostPortStr){
-    char u[1024];
-    strcpy(u,connectStr);
+void setSocketTimeout(boost::asio::ip::tcp::socket& socket, int32_t timeout){
+#if defined _WIN32
+    int32_t timeoutMsecs=timeout*1000;
+    setsockopt(socket.native(), SOL_SOCKET, SO_RCVTIMEO, (const char*)&timeoutMsecs, sizeof(timeoutMsecs));
+    setsockopt(socket.native(), SOL_SOCKET, SO_SNDTIMEO, (const char*)&timeoutMsecs, sizeof(timeoutMsecs));
+#else
+    struct timeval tv;
+    tv.tv_sec  = timeout;
+    tv.tv_usec = 0;
+    int e=0;
+    e=setsockopt(socket.native(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
+    e=setsockopt(socket.native(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv));
+#endif
+}
+
+
+void DrillClientImpl::parseConnectStr(const char* connectStr,
+        std::string& pathToDrill,
+        std::string& protocol,
+        std::string& hostPortStr){
+    char u[MAX_CONNECT_STR+1];
+    strncpy(u,connectStr, MAX_CONNECT_STR); u[MAX_CONNECT_STR]=0;
     char* z=strtok(u, "=");
-    char* c=strtok(NULL, "");
+    char* c=strtok(NULL, "/");
+    char* p=strtok(NULL, "");
+
+    if(p!=NULL) pathToDrill=std::string("/")+p;
     protocol=z; hostPortStr=c;
+    return;
 }
 
 connectionStatus_t DrillClientImpl::connect(const char* connStr){
-    std::string protocol, hostPortStr;
-    std::string host; 
+    std::string pathToDrill, protocol, hostPortStr;
+    std::string host;
     std::string port;
     if(!this->m_bIsConnected){
-        parseConnectStr(connStr, protocol, hostPortStr);
-        if(!strcmp(protocol.c_str(), "jdbc:drill:zk")){
+        parseConnectStr(connStr, pathToDrill, protocol, hostPortStr);
+        if(!strcmp(protocol.c_str(), "zk")){
             ZookeeperImpl zook;
-            if(zook.connectToZookeeper(hostPortStr.c_str())!=0){
+            if(zook.connectToZookeeper(hostPortStr.c_str(), pathToDrill.c_str())!=0){
                 return handleConnError(CONN_ZOOKEEPER_ERROR, getMessage(ERR_CONN_ZOOKEEPER, zook.getError().c_str()));
             }
             zook.debugPrint();
@@ -74,9 +94,9 @@ connectionStatus_t DrillClientImpl::connect(const char* connStr){
             host=boost::lexical_cast<std::string>(e.address());
             port=boost::lexical_cast<std::string>(e.user_port());
             zook.close();
-        }else if(!strcmp(protocol.c_str(), "jdbc:drill:local")){
-            char tempStr[1024];
-            strcpy(tempStr, hostPortStr.c_str());
+        }else if(!strcmp(protocol.c_str(), "local")){
+            char tempStr[MAX_CONNECT_STR+1];
+            strncpy(tempStr, hostPortStr.c_str(), MAX_CONNECT_STR); tempStr[MAX_CONNECT_STR]=0;
             host=strtok(tempStr, ":");
             port=strtok(NULL, "");
         }else{
@@ -97,7 +117,7 @@ connectionStatus_t DrillClientImpl::connect(const char* host, const char* port){
         tcp::resolver::iterator end;
         while (iter != end){
             endpoint = *iter++;
-            BOOST_LOG_TRIVIAL(trace) << endpoint << std::endl;
+            DRILL_LOG(LOG_TRACE) << endpoint << std::endl;
         }
         boost::system::error_code ec;
         m_socket.connect(endpoint, ec);
@@ -108,59 +128,147 @@ connectionStatus_t DrillClientImpl::connect(const char* host, const char* port){
     }catch(std::exception e){
         return handleConnError(CONN_FAILURE, getMessage(ERR_CONN_EXCEPT, e.what()));
     }
+
+    //
+    // We put some OS dependent code here for timing out a socket. Mostly, this appears to
+    // do nothing. Should we leave it in there?
+    //
+    setSocketTimeout(m_socket, DrillClientConfig::getSocketTimeout());
+
     return CONN_SUCCESS;
 }
 
-void DrillClientImpl::sendSync(OutBoundRpcMessage& msg){
+connectionStatus_t DrillClientImpl::sendSync(OutBoundRpcMessage& msg){
     DrillClientImpl::s_encoder.Encode(m_wbuf, msg);
-    m_socket.write_some(boost::asio::buffer(m_wbuf));
+    boost::system::error_code ec;
+    size_t s=m_socket.write_some(boost::asio::buffer(m_wbuf), ec);
+    if(!ec && s!=0){
+    return CONN_SUCCESS;
+    }else{
+        return handleConnError(CONN_FAILURE, getMessage(ERR_CONN_WFAIL, ec.message().c_str()));
+    }
 }
 
-void DrillClientImpl::recvSync(InBoundRpcMessage& msg){
-    m_socket.read_some(boost::asio::buffer(m_rbuf));
-    uint32_t length = 0;
-    int bytes_read = DrillClientImpl::s_decoder.LengthDecode(m_rbuf.data(), &length);
-    DrillClientImpl::s_decoder.Decode(m_rbuf.data() + bytes_read, length, msg);
+connectionStatus_t DrillClientImpl::recvHandshake(){
+    if(m_rbuf==NULL){
+        m_rbuf = Utils::allocateBuffer(MAX_SOCK_RD_BUFSIZE);
+    }
+
+    m_deadlineTimer.expires_from_now(boost::posix_time::seconds(DrillClientConfig::getSocketTimeout()));
+    m_deadlineTimer.async_wait(boost::bind(
+                &DrillClientImpl::handleHShakeReadTimeout,
+                this,
+                boost::asio::placeholders::error
+                ));
+    DRILL_LOG(LOG_TRACE) << "Started new handshake wait timer."  << std::endl;
+
+    async_read(
+            this->m_socket,
+            boost::asio::buffer(m_rbuf, LEN_PREFIX_BUFLEN),
+            boost::bind(
+                &DrillClientImpl::handleHandshake,
+                this,
+                m_rbuf,
+                boost::asio::placeholders::error,
+                boost::asio::placeholders::bytes_transferred)
+            );
+    DRILL_LOG(LOG_DEBUG) << "Sent handshake read request to server" << std::endl;
+    m_io_service.run();
+    if(m_rbuf!=NULL){
+        Utils::freeBuffer(m_rbuf); m_rbuf=NULL;
+    }
+    return CONN_SUCCESS;
 }
 
-bool DrillClientImpl::ValidateHandShake(){
-    exec::user::UserToBitHandshake u2b;
-    exec::user::BitToUserHandshake b2u;
+void DrillClientImpl::handleHandshake(ByteBuf_t _buf,
+        const boost::system::error_code& err,
+        size_t bytes_transferred) {
+    boost::system::error_code error=err;
+    // cancel the timer
+    m_deadlineTimer.cancel();
+    DRILL_LOG(LOG_TRACE) << "Deadline timer cancelled."  << std::endl;
+    if(!error){
+        InBoundRpcMessage msg;
+        uint32_t length = 0;
+        int bytes_read = DrillClientImpl::s_decoder.LengthDecode(m_rbuf, &length);
+        if(length>0){
+            size_t leftover = LEN_PREFIX_BUFLEN - bytes_read;
+            ByteBuf_t b=m_rbuf + LEN_PREFIX_BUFLEN;
+            size_t bytesToRead=length - leftover;
+            while(1){
+                size_t dataBytesRead=m_socket.read_some(
+                        boost::asio::buffer(b, bytesToRead),
+                        error);
+                if(err) break;
+                DRILL_LOG(LOG_TRACE) << "Handshake Message: actual bytes read = " << dataBytesRead << std::endl;
+                if(dataBytesRead==bytesToRead) break;
+                bytesToRead-=dataBytesRead;
+                b+=dataBytesRead;
+            }
+            DrillClientImpl::s_decoder.Decode(m_rbuf+bytes_read, length, msg);
+        }else{
+            handleConnError(CONN_FAILURE, getMessage(ERR_CONN_RDFAIL, "No handshake"));
+        }
+        exec::user::BitToUserHandshake b2u;
+        b2u.ParseFromArray(msg.m_pbody.data(), msg.m_pbody.size());
+        this->m_handshakeVersion=b2u.rpc_version();
+
+    }else{
+        // boost error
+        handleConnError(CONN_FAILURE, getMessage(ERR_CONN_RDFAIL, error.message().c_str()));
+        return;
+    }
+    return;
+}
 
+void DrillClientImpl::handleHShakeReadTimeout(const boost::system::error_code & err){
+    // if err == boost::asio::error::operation_aborted) then the caller cancelled the timer.
+    if(!err){
+        // Check whether the deadline has passed.
+        if (m_deadlineTimer.expires_at() <= boost::asio::deadline_timer::traits_type::now()){
+            // The deadline has passed.
+            m_deadlineTimer.expires_at(boost::posix_time::pos_infin);
+            DRILL_LOG(LOG_TRACE) << "Deadline timer expired."  << std::endl;
+            m_socket.close();
+        }
+    }
+    return;
+}
+
+bool DrillClientImpl::validateHandShake(){
+    exec::user::UserToBitHandshake u2b;
     u2b.set_channel(exec::shared::USER);
-    u2b.set_rpc_version(1);
+    u2b.set_rpc_version(DRILL_RPC_VERSION);
     u2b.set_support_listening(true);
-
     {
         boost::lock_guard<boost::mutex> lock(this->m_dcMutex);
         uint64_t coordId = this->getNextCoordinationId();
 
         OutBoundRpcMessage out_msg(exec::rpc::REQUEST, exec::user::HANDSHAKE, coordId, &u2b);
         sendSync(out_msg);
-
-        InBoundRpcMessage in_msg;
-        recvSync(in_msg);
-
-        b2u.ParseFromArray(in_msg.m_pbody.data(), in_msg.m_pbody.size());
     }
 
-    // validate handshake
-    if (b2u.rpc_version() != u2b.rpc_version()) {
-        BOOST_LOG_TRIVIAL(trace) << "Invalid rpc version.  Expected << " 
-            << u2b.rpc_version() << ", actual "<< b2u.rpc_version() << "." ;
-        handleConnError(CONN_HANDSHAKE_FAILED, 
-                getMessage(ERR_CONN_NOHSHAKE, u2b.rpc_version(), b2u.rpc_version()));
+    recvHandshake();
+    this->m_io_service.reset();
+    if(this->m_pError!=NULL){
+        return false;
+    }
+    if(m_handshakeVersion != u2b.rpc_version()) {
+        DRILL_LOG(LOG_TRACE) << "Invalid rpc version.  Expected << "
+            << DRILL_RPC_VERSION << ", actual "<< m_handshakeVersion << "." << std::endl;
+        handleConnError(CONN_HANDSHAKE_FAILED,
+                getMessage(ERR_CONN_NOHSHAKE, DRILL_RPC_VERSION, m_handshakeVersion));
         return false;
     }
     return true;
 }
 
 
-std::vector<Drill::FieldMetadata*> DrillClientQueryResult::s_emptyColDefs;
+FieldDefPtr DrillClientQueryResult::s_emptyColDefs( new (std::vector<Drill::FieldMetadata*>));
 
-DrillClientQueryResult* DrillClientImpl::SubmitQuery(exec::user::QueryType t, 
-        const std::string& plan, 
-        pfnQueryResultsListener l, 
+DrillClientQueryResult* DrillClientImpl::SubmitQuery(::exec::shared::QueryType t,
+        const std::string& plan,
+        pfnQueryResultsListener l,
         void* lCtx){
     exec::user::RunQuery query;
     query.set_results_mode(exec::user::STREAM_FULL);
@@ -180,16 +288,16 @@ DrillClientQueryResult* DrillClientImpl::SubmitQuery(exec::user::QueryType t,
         bool sendRequest=false;
         this->m_queryIds[coordId]=pQuery;
 
-        BOOST_LOG_TRIVIAL(debug)  << "Submit Query Request. Coordination id = " << coordId;
+        DRILL_LOG(LOG_DEBUG)  << "Submit Query Request. Coordination id = " << coordId << std::endl;
 
         if(m_pendingRequests++==0){
             sendRequest=true;
         }else{
-            BOOST_LOG_TRIVIAL(debug) << "Queueing read request to server" << std::endl;
-            BOOST_LOG_TRIVIAL(debug) << "Number of pending requests = " << m_pendingRequests << std::endl;
+            DRILL_LOG(LOG_DEBUG) << "Queueing read request to server" << std::endl;
+            DRILL_LOG(LOG_DEBUG) << "Number of pending requests = " << m_pendingRequests << std::endl;
         }
         if(sendRequest){
-            BOOST_LOG_TRIVIAL(debug) << "Sending read request. Number of pending requests = " 
+            DRILL_LOG(LOG_DEBUG) << "Sending read request. Number of pending requests = "
                 << m_pendingRequests << std::endl;
             getNextResult(); // async wait for results
         }
@@ -198,8 +306,8 @@ DrillClientQueryResult* DrillClientImpl::SubmitQuery(exec::user::QueryType t,
     //run this in a new thread
     {
         if(this->m_pListenerThread==NULL){
-            BOOST_LOG_TRIVIAL(debug) << "Starting listener thread." << std::endl;
-            this->m_pListenerThread= new boost::thread(boost::bind(&boost::asio::io_service::run, 
+            DRILL_LOG(LOG_DEBUG) << "Starting listener thread." << std::endl;
+            this->m_pListenerThread= new boost::thread(boost::bind(&boost::asio::io_service::run,
                         &this->m_io_service));
         }
     }
@@ -211,61 +319,72 @@ void DrillClientImpl::getNextResult(){
     // This call is always made from within a function where the mutex has already been acquired
     //boost::lock_guard<boost::mutex> lock(this->m_dcMutex);
 
-    //use free, not delete to free 
-    ByteBuf_t readBuf = allocateBuffer(LEN_PREFIX_BUFLEN);
-    async_read( 
+    //use free, not delete to free
+    ByteBuf_t readBuf = Utils::allocateBuffer(LEN_PREFIX_BUFLEN);
+
+    m_deadlineTimer.expires_from_now(boost::posix_time::seconds(DrillClientConfig::getSocketTimeout()));
+    m_deadlineTimer.async_wait(boost::bind(
+                &DrillClientImpl::handleReadTimeout,
+                this,
+                boost::asio::placeholders::error
+                ));
+    DRILL_LOG(LOG_TRACE) << "Started new async wait timer."  << std::endl;
+
+    async_read(
             this->m_socket,
             boost::asio::buffer(readBuf, LEN_PREFIX_BUFLEN),
             boost::bind(
                 &DrillClientImpl::handleRead,
                 this,
                 readBuf,
-                boost::asio::placeholders::error, 
+                boost::asio::placeholders::error,
                 boost::asio::placeholders::bytes_transferred)
             );
-    BOOST_LOG_TRIVIAL(debug) << "Sent read request to server" << std::endl;
+    DRILL_LOG(LOG_DEBUG) << "Sent read request to server" << std::endl;
 }
 
 void DrillClientImpl::waitForResults(){
     this->m_pListenerThread->join();
-    BOOST_LOG_TRIVIAL(debug) << "Listener thread exited." << std::endl;
+    DRILL_LOG(LOG_DEBUG) << "Listener thread exited." << std::endl;
     delete this->m_pListenerThread; this->m_pListenerThread=NULL;
 }
 
-status_t DrillClientImpl::readMsg(ByteBuf_t _buf, InBoundRpcMessage& msg, boost::system::error_code& error){
+status_t DrillClientImpl::readMsg(ByteBuf_t _buf, ByteBuf_t* allocatedBuffer, InBoundRpcMessage& msg, boost::system::error_code& error){
     size_t leftover=0;
     uint32_t rmsgLen;
     ByteBuf_t currentBuffer;
+    *allocatedBuffer=NULL;
     {
-        // We need to protect the readLength and read buffer, and the pending requests counter, 
+        // We need to protect the readLength and read buffer, and the pending requests counter,
         // but we don't have to keep the lock while we decode the rest of the buffer.
         boost::lock_guard<boost::mutex> lock(this->m_dcMutex);
         int bytes_read = DrillClientImpl::s_decoder.LengthDecode(_buf, &rmsgLen);
-        BOOST_LOG_TRIVIAL(trace) << "len bytes = " << bytes_read << std::endl;
-        BOOST_LOG_TRIVIAL(trace) << "rmsgLen = " << rmsgLen << std::endl;
+        DRILL_LOG(LOG_TRACE) << "len bytes = " << bytes_read << std::endl;
+        DRILL_LOG(LOG_TRACE) << "rmsgLen = " << rmsgLen << std::endl;
 
         if(rmsgLen>0){
             leftover = LEN_PREFIX_BUFLEN - bytes_read;
             // Allocate a buffer
-            BOOST_LOG_TRIVIAL(trace) << "Allocated and locked buffer." << std::endl;
-            currentBuffer=allocateBuffer(rmsgLen);
+            DRILL_LOG(LOG_TRACE) << "Allocated and locked buffer." << std::endl;
+            currentBuffer=Utils::allocateBuffer(rmsgLen);
             if(currentBuffer==NULL){
+                Utils::freeBuffer(_buf);
                 return handleQryError(QRY_CLIENT_OUTOFMEM, getMessage(ERR_QRY_OUTOFMEM), NULL);
             }
+            *allocatedBuffer=currentBuffer;
             if(leftover){
                 memcpy(currentBuffer, _buf + bytes_read, leftover);
             }
-            freeBuffer(_buf);
-            BOOST_LOG_TRIVIAL(trace) << "reading data (rmsgLen - leftover) : " 
+            DRILL_LOG(LOG_TRACE) << "reading data (rmsgLen - leftover) : "
                 << (rmsgLen - leftover) << std::endl;
             ByteBuf_t b=currentBuffer + leftover;
             size_t bytesToRead=rmsgLen - leftover;
             while(1){
                 size_t dataBytesRead=this->m_socket.read_some(
-                        boost::asio::buffer(b, bytesToRead), 
+                        boost::asio::buffer(b, bytesToRead),
                         error);
                 if(error) break;
-                BOOST_LOG_TRIVIAL(trace) << "Data Message: actual bytes read = " << dataBytesRead << std::endl;
+                DRILL_LOG(LOG_TRACE) << "Data Message: actual bytes read = " << dataBytesRead << std::endl;
                 if(dataBytesRead==bytesToRead) break;
                 bytesToRead-=dataBytesRead;
                 b+=dataBytesRead;
@@ -273,31 +392,34 @@ status_t DrillClientImpl::readMsg(ByteBuf_t _buf, InBoundRpcMessage& msg, boost:
             if(!error){
                 // read data successfully
                 DrillClientImpl::s_decoder.Decode(currentBuffer, rmsgLen, msg);
-                BOOST_LOG_TRIVIAL(trace) << "Done decoding chunk. Coordination id: " <<msg.m_coord_id<< std::endl;
+                DRILL_LOG(LOG_TRACE) << "Done decoding chunk. Coordination id: " <<msg.m_coord_id<< std::endl;
             }else{
-                return handleQryError(QRY_COMM_ERROR, 
+                Utils::freeBuffer(_buf);
+                return handleQryError(QRY_COMM_ERROR,
                         getMessage(ERR_QRY_COMMERR, error.message().c_str()), NULL);
             }
         }else{
-            // got a message with an invalid read length. 
+            // got a message with an invalid read length.
+            Utils::freeBuffer(_buf);
             return handleQryError(QRY_INTERNAL_ERROR, getMessage(ERR_QRY_INVREADLEN), NULL);
         }
     }
+    Utils::freeBuffer(_buf);
     return QRY_SUCCESS;
 }
 
-status_t DrillClientImpl::processQueryResult(InBoundRpcMessage& msg ){
+status_t DrillClientImpl::processQueryResult(ByteBuf_t allocatedBuffer, InBoundRpcMessage& msg ){
     DrillClientQueryResult* pDrillClientQueryResult=NULL;
     status_t ret=QRY_SUCCESS;
     {
         boost::lock_guard<boost::mutex> lock(this->m_dcMutex);
         exec::user::QueryResult* qr = new exec::user::QueryResult; //Record Batch will own this object and free it up.
 
-        BOOST_LOG_TRIVIAL(debug) << "Processing Query Result " << std::endl;
+        DRILL_LOG(LOG_DEBUG) << "Processing Query Result " << std::endl;
         qr->ParseFromArray(msg.m_pbody.data(), msg.m_pbody.size());
-        BOOST_LOG_TRIVIAL(trace) << qr->DebugString();
+        DRILL_LOG(LOG_TRACE) << qr->DebugString() << std::endl;
 
-        BOOST_LOG_TRIVIAL(debug) << "Searching for Query Id - " << debugPrintQid(qr->query_id()) << std::endl;
+        DRILL_LOG(LOG_DEBUG) << "Searching for Query Id - " << debugPrintQid(qr->query_id()) << std::endl;
 
         exec::shared::QueryId qid;
         qid.CopyFrom(qr->query_id());
@@ -306,39 +428,48 @@ status_t DrillClientImpl::processQueryResult(InBoundRpcMessage& msg ){
         if(it!=this->m_queryResults.end()){
             pDrillClientQueryResult=(*it).second;
         }else{
-            assert(0); 
+            assert(0);
             //assert might be compiled away in a release build. So return an error to the app.
             status_t ret= handleQryError(QRY_INTERNAL_ERROR, getMessage(ERR_QRY_OUTOFORDER), NULL);
             delete qr;
             return ret;
         }
-        BOOST_LOG_TRIVIAL(debug) << "Drill Client Query Result Query Id - " << 
-            debugPrintQid(*pDrillClientQueryResult->m_pQueryId) 
+        DRILL_LOG(LOG_DEBUG) << "Drill Client Query Result Query Id - " <<
+            debugPrintQid(*pDrillClientQueryResult->m_pQueryId)
             << std::endl;
         //Check QueryResult.queryState. QueryResult could have an error.
         if(qr->query_state() == exec::user::QueryResult_QueryState_FAILED){
             status_t ret=handleQryError(QRY_FAILURE, qr->error(0), pDrillClientQueryResult);
+            Utils::freeBuffer(allocatedBuffer);
             delete qr;
             return ret;
         }
         //Validate the RPC message
         std::string valErr;
         if( (ret=validateMessage(msg, *qr, valErr)) != QRY_SUCCESS){
+            Utils::freeBuffer(allocatedBuffer);
+            delete qr;
             return handleQryError(ret, getMessage(ERR_QRY_INVRPC, valErr.c_str()), pDrillClientQueryResult);
         }
 
-        //Build Record Batch here 
-        BOOST_LOG_TRIVIAL(trace) << qr->DebugString();
+        //Build Record Batch here
+        DRILL_LOG(LOG_TRACE) << qr->DebugString() << std::endl;
+
+        RecordBatch* pRecordBatch= new RecordBatch(qr, allocatedBuffer,  msg.m_dbody);
+        pDrillClientQueryResult->m_numBatches++;
 
-        RecordBatch* pRecordBatch= new RecordBatch(qr, msg.m_dbody);
+        DRILL_LOG(LOG_TRACE) << "Allocated new Record batch." << (void*)pRecordBatch << std::endl;
         pRecordBatch->build();
-        BOOST_LOG_TRIVIAL(debug) << debugPrintQid(qr->query_id())<<"recordBatch.numRecords " 
+        DRILL_LOG(LOG_DEBUG) << debugPrintQid(qr->query_id())<<"recordBatch.numRecords "
             << pRecordBatch->getNumRecords()  << std::endl;
-        BOOST_LOG_TRIVIAL(debug) << debugPrintQid(qr->query_id())<<"recordBatch.numFields " 
+        DRILL_LOG(LOG_DEBUG) << debugPrintQid(qr->query_id())<<"recordBatch.numFields "
             << pRecordBatch->getNumFields()  << std::endl;
-        BOOST_LOG_TRIVIAL(debug) << debugPrintQid(qr->query_id())<<"recordBatch.isLastChunk " 
+        DRILL_LOG(LOG_DEBUG) << debugPrintQid(qr->query_id())<<"recordBatch.isLastChunk "
             << pRecordBatch->isLastChunk()  << std::endl;
 
+        ret=pDrillClientQueryResult->setupColumnDefs(qr);
+        if(ret==QRY_SUCCESS_WITH_INFO)pRecordBatch->schemaChanged(true);
+
         pDrillClientQueryResult->m_bIsQueryPending=true;
         pDrillClientQueryResult->m_bIsLastChunk=qr->is_last_chunk();
         pfnQueryResultsListener pResultsListener=pDrillClientQueryResult->m_pResultsListener;
@@ -346,7 +477,7 @@ status_t DrillClientImpl::processQueryResult(InBoundRpcMessage& msg ){
             ret = pResultsListener(pDrillClientQueryResult, pRecordBatch, NULL);
         }else{
             //Use a default callback that is called when a record batch is received
-            ret = pDrillClientQueryResult->defaultQueryResultsListener(pDrillClientQueryResult, 
+            ret = pDrillClientQueryResult->defaultQueryResultsListener(pDrillClientQueryResult,
                     pRecordBatch, NULL);
         }
     } // release lock
@@ -357,16 +488,16 @@ status_t DrillClientImpl::processQueryResult(InBoundRpcMessage& msg ){
             m_pendingRequests--;
         }
         pDrillClientQueryResult->m_bIsQueryPending=false;
-        BOOST_LOG_TRIVIAL(debug) << "Client app cancelled query.";
+        DRILL_LOG(LOG_DEBUG) << "Client app cancelled query." << std::endl;
         return ret;
     }
     if(pDrillClientQueryResult->m_bIsLastChunk){
         {
             boost::lock_guard<boost::mutex> lock(this->m_dcMutex);
             m_pendingRequests--;
-            BOOST_LOG_TRIVIAL(debug) << debugPrintQid(*pDrillClientQueryResult->m_pQueryId) 
+            DRILL_LOG(LOG_DEBUG) << debugPrintQid(*pDrillClientQueryResult->m_pQueryId)
                 <<  "Received last batch. " << std::endl;
-            BOOST_LOG_TRIVIAL(debug) << debugPrintQid(*pDrillClientQueryResult->m_pQueryId) 
+            DRILL_LOG(LOG_DEBUG) << debugPrintQid(*pDrillClientQueryResult->m_pQueryId)
                 << "Pending requests: " << m_pendingRequests <<"." << std::endl;
         }
         ret=QRY_NO_MORE_DATA;
@@ -377,9 +508,9 @@ status_t DrillClientImpl::processQueryResult(InBoundRpcMessage& msg ){
     return ret;
 }
 
-status_t DrillClientImpl::processQueryId(InBoundRpcMessage& msg ){
+status_t DrillClientImpl::processQueryId(ByteBuf_t allocatedBuffer, InBoundRpcMessage& msg ){
     DrillClientQueryResult* pDrillClientQueryResult=NULL;
-    BOOST_LOG_TRIVIAL(debug) << "Processing Query Handle with coordination id:" << msg.m_coord_id << std::endl;
+    DRILL_LOG(LOG_DEBUG) << "Processing Query Handle with coordination id:" << msg.m_coord_id << std::endl;
     status_t ret=QRY_SUCCESS;
 
     boost::lock_guard<boost::mutex> lock(m_dcMutex);
@@ -388,45 +519,74 @@ status_t DrillClientImpl::processQueryId(InBoundRpcMessage& msg ){
     if(it!=this->m_queryIds.end()){
         pDrillClientQueryResult=(*it).second;
         exec::shared::QueryId *qid = new exec::shared::QueryId;
-        BOOST_LOG_TRIVIAL(trace)  << "Received Query Handle" << msg.m_pbody.size();
+        DRILL_LOG(LOG_TRACE)  << "Received Query Handle" << msg.m_pbody.size() << std::endl;
         qid->ParseFromArray(msg.m_pbody.data(), msg.m_pbody.size());
-        BOOST_LOG_TRIVIAL(trace) << qid->DebugString();
+        DRILL_LOG(LOG_TRACE) << qid->DebugString() << std::endl;
         m_queryResults[qid]=pDrillClientQueryResult;
         //save queryId allocated here so we can free it later
         pDrillClientQueryResult->setQueryId(qid);
     }else{
+        Utils::freeBuffer(allocatedBuffer);
         return handleQryError(QRY_INTERNAL_ERROR, getMessage(ERR_QRY_INVQUERYID), NULL);
     }
+    Utils::freeBuffer(allocatedBuffer);
     return ret;
 }
 
-void DrillClientImpl::handleRead(ByteBuf_t _buf, 
-        const boost::system::error_code& err, 
+void DrillClientImpl::handleReadTimeout(const boost::system::error_code & err){
+    // if err == boost::asio::error::operation_aborted) then the caller cancelled the timer.
+    if(!err){
+        // Check whether the deadline has passed.
+        if (m_deadlineTimer.expires_at() <= boost::asio::deadline_timer::traits_type::now()){
+            // The deadline has passed.
+            handleQryError(QRY_COMM_ERROR, getMessage(ERR_QRY_TIMOUT), NULL);
+            // There is no longer an active deadline. The expiry is set to positive
+            // infinity so that the timer never expires until a new deadline is set.
+            // Note that at this time, the caller is not in a (async) wait for the timer.
+            m_deadlineTimer.expires_at(boost::posix_time::pos_infin);
+            DRILL_LOG(LOG_TRACE) << "Deadline timer expired."  << std::endl;
+            // Cancel all pending async IOs.
+            // The cancel call _MAY_ not work on all platforms. To be a little more reliable we need
+            // to have the BOOST_ASIO_ENABLE_CANCELIO macro (as well as the BOOST_ASIO_DISABLE_IOCP macro?)
+            // defined. To be really sure, we need to close the socket. Closing the socket is a bit
+            // drastic and we will defer that till a later release.
+            m_socket.cancel();
+        }
+    }
+    return;
+}
+
+void DrillClientImpl::handleRead(ByteBuf_t _buf,
+        const boost::system::error_code& err,
         size_t bytes_transferred) {
     boost::system::error_code error=err;
+    // cancel the timer
+    m_deadlineTimer.cancel();
+    DRILL_LOG(LOG_TRACE) << "Deadline timer cancelled."  << std::endl;
     if(!error){
         InBoundRpcMessage msg;
 
-        BOOST_LOG_TRIVIAL(trace) << "Getting new message" << std::endl;
+        DRILL_LOG(LOG_TRACE) << "Getting new message" << std::endl;
+        ByteBuf_t allocatedBuffer=NULL;
 
-        if(readMsg(_buf, msg, error)!=QRY_SUCCESS){
+        if(readMsg(_buf, &allocatedBuffer, msg, error)!=QRY_SUCCESS){
             if(m_pendingRequests!=0){
                 boost::lock_guard<boost::mutex> lock(this->m_dcMutex);
                 getNextResult();
             }
             return;
-        } 
+        }
 
         if(!error && msg.m_rpc_type==exec::user::QUERY_RESULT){
-            if(processQueryResult(msg)!=QRY_SUCCESS){
+            if(processQueryResult(allocatedBuffer, msg)!=QRY_SUCCESS){
                 if(m_pendingRequests!=0){
                     boost::lock_guard<boost::mutex> lock(this->m_dcMutex);
                     getNextResult();
                 }
                 return;
             }
-        }else if(!error && msg.m_rpc_type==exec::user::QUERY_HANDLE){ 
-            if(processQueryId(msg)!=QRY_SUCCESS){
+        }else if(!error && msg.m_rpc_type==exec::user::QUERY_HANDLE){
+            if(processQueryId(allocatedBuffer, msg)!=QRY_SUCCESS){
                 if(m_pendingRequests!=0){
                     boost::lock_guard<boost::mutex> lock(this->m_dcMutex);
                     getNextResult();
@@ -436,15 +596,15 @@ void DrillClientImpl::handleRead(ByteBuf_t _buf,
         }else{
             boost::lock_guard<boost::mutex> lock(this->m_dcMutex);
             if(error){
-                // We have a socket read error, but we do not know which query this is for. 
+                // We have a socket read error, but we do not know which query this is for.
                 // Signal ALL pending queries that they should stop waiting.
-                BOOST_LOG_TRIVIAL(trace) << "read error: " << error << "\n";
+                DRILL_LOG(LOG_TRACE) << "read error: " << error << std::endl;
                 handleQryError(QRY_COMM_ERROR, getMessage(ERR_QRY_COMMERR, error.message().c_str()), NULL);
                 return;
             }else{
                 //If not QUERY_RESULT, then we think something serious has gone wrong?
                 assert(0);
-                BOOST_LOG_TRIVIAL(trace) << "QueryResult returned " << msg.m_rpc_type;
+                DRILL_LOG(LOG_TRACE) << "QueryResult returned " << msg.m_rpc_type << std::endl;
                 handleQryError(QRY_INTERNAL_ERROR, getMessage(ERR_QRY_INVRPCTYPE, msg.m_rpc_type), NULL);
                 return;
             }
@@ -455,6 +615,7 @@ void DrillClientImpl::handleRead(ByteBuf_t _buf,
         }
     }else{
         // boost error
+        Utils::freeBuffer(_buf);
         boost::lock_guard<boost::mutex> lock(this->m_dcMutex);
         handleQryError(QRY_COMM_ERROR, getMessage(ERR_QRY_COMMERR, error.message().c_str()), NULL);
         return;
@@ -485,6 +646,7 @@ status_t DrillClientImpl::validateMessage(InBoundRpcMessage& msg, exec::user::Qu
 connectionStatus_t DrillClientImpl::handleConnError(connectionStatus_t status, std::string msg){
     DrillClientError* pErr = new DrillClientError(status, DrillClientError::CONN_ERROR_START+status, msg);
     m_pendingRequests=0;
+    if(m_pError!=NULL){ delete m_pError; m_pError=NULL;}
     m_pError=pErr;
     broadcastError(this->m_pError);
     return status;
@@ -492,19 +654,20 @@ connectionStatus_t DrillClientImpl::handleConnError(connectionStatus_t status, s
 
 status_t DrillClientImpl::handleQryError(status_t status, std::string msg, DrillClientQueryResult* pQueryResult){
     DrillClientError* pErr = new DrillClientError(status, DrillClientError::QRY_ERROR_START+status, msg);
+    if(m_pError!=NULL){ delete m_pError; m_pError=NULL;}
+    m_pError=pErr;
     if(pQueryResult!=NULL){
         m_pendingRequests--;
         pQueryResult->signalError(pErr);
     }else{
         m_pendingRequests=0;
-        m_pError=pErr;
         broadcastError(this->m_pError);
     }
     return status;
 }
 
-status_t DrillClientImpl::handleQryError(status_t status, 
-        const exec::shared::DrillPBError& e, 
+status_t DrillClientImpl::handleQryError(status_t status,
+        const exec::shared::DrillPBError& e,
         DrillClientQueryResult* pQueryResult){
     assert(pQueryResult!=NULL);
     this->m_pError = DrillClientError::getErrorObject(e);
@@ -516,8 +679,10 @@ status_t DrillClientImpl::handleQryError(status_t status,
 void DrillClientImpl::broadcastError(DrillClientError* pErr){
     if(pErr!=NULL){
         std::map<int, DrillClientQueryResult*>::iterator iter;
-        for(iter = m_queryIds.begin(); iter != m_queryIds.end(); iter++) {
-            iter->second->signalError(pErr);
+        if(!m_queryIds.empty()){
+            for(iter = m_queryIds.begin(); iter != m_queryIds.end(); iter++) {
+                iter->second->signalError(pErr);
+            }
         }
     }
     return;
@@ -526,17 +691,21 @@ void DrillClientImpl::broadcastError(DrillClientError* pErr){
 void DrillClientImpl::clearMapEntries(DrillClientQueryResult* pQueryResult){
     std::map<int, DrillClientQueryResult*>::iterator iter;
     boost::lock_guard<boost::mutex> lock(m_dcMutex);
-    for(iter=m_queryIds.begin(); iter!=m_queryIds.end(); iter++) {
-        if(pQueryResult==(DrillClientQueryResult*)iter->second){
-            m_queryIds.erase(iter->first);
-            break;
+    if(!m_queryIds.empty()){
+        for(iter=m_queryIds.begin(); iter!=m_queryIds.end(); iter++) {
+            if(pQueryResult==(DrillClientQueryResult*)iter->second){
+                m_queryIds.erase(iter->first);
+                break;
+            }
         }
     }
-    std::map<exec::shared::QueryId*, DrillClientQueryResult*, compareQueryId>::iterator it;
-    for(it=m_queryResults.begin(); it!=m_queryResults.end(); it++) {
-        if(pQueryResult==(DrillClientQueryResult*)it->second){
-            m_queryResults.erase(it->first);
-            break;
+    if(!m_queryResults.empty()){
+        std::map<exec::shared::QueryId*, DrillClientQueryResult*, compareQueryId>::iterator it;
+        for(it=m_queryResults.begin(); it!=m_queryResults.end(); it++) {
+            if(pQueryResult==(DrillClientQueryResult*)it->second){
+                m_queryResults.erase(it->first);
+                break;
+            }
         }
     }
 }
@@ -547,7 +716,7 @@ void DrillClientImpl::sendAck(InBoundRpcMessage& msg){
     OutBoundRpcMessage ack_msg(exec::rpc::RESPONSE, exec::user::ACK, msg.m_coord_id, &ack);
     boost::lock_guard<boost::mutex> lock(m_dcMutex);
     sendSync(ack_msg);
-    BOOST_LOG_TRIVIAL(trace) << "ACK sent" << std::endl;
+    DRILL_LOG(LOG_TRACE) << "ACK sent" << std::endl;
 }
 
 void DrillClientImpl::sendCancel(InBoundRpcMessage& msg){
@@ -556,34 +725,36 @@ void DrillClientImpl::sendCancel(InBoundRpcMessage& msg){
     OutBoundRpcMessage ack_msg(exec::rpc::RESPONSE, exec::user::CANCEL_QUERY, msg.m_coord_id, &ack);
     boost::lock_guard<boost::mutex> lock(m_dcMutex);
     sendSync(ack_msg);
-    BOOST_LOG_TRIVIAL(trace) << "CANCEL sent" << std::endl;
+    DRILL_LOG(LOG_TRACE) << "CANCEL sent" << std::endl;
 }
 
-// This COPIES the FieldMetadata definition for the record batch.  ColumnDefs held by this 
+// This COPIES the FieldMetadata definition for the record batch.  ColumnDefs held by this
 // class are used by the async callbacks.
 status_t DrillClientQueryResult::setupColumnDefs(exec::user::QueryResult* pQueryResult) {
     bool hasSchemaChanged=false;
+    bool isFirstIter=false;
     boost::lock_guard<boost::mutex> schLock(this->m_schemaMutex);
 
-    std::vector<Drill::FieldMetadata*> prevSchema=this->m_columnDefs;
+    FieldDefPtr prevSchema=this->m_columnDefs;
+    isFirstIter=this->m_numBatches==1?true:false;
     std::map<std::string, Drill::FieldMetadata*> oldSchema;
-    for(std::vector<Drill::FieldMetadata*>::iterator it = prevSchema.begin(); it != prevSchema.end(); ++it){
-        // the key is the field_name + type
-        char type[256];
-        sprintf(type, ":%d:%d",(*it)->getMinorType(), (*it)->getDataMode() );
-        std::string k= (*it)->getName()+type;
-        oldSchema[k]=*it;
+    if(!m_columnDefs->empty()){
+        for(std::vector<Drill::FieldMetadata*>::iterator it = prevSchema->begin(); it != prevSchema->end(); ++it){
+            // the key is the field_name + type
+            char type[256];
+            sprintf(type, ":%d:%d",(*it)->getMinorType(), (*it)->getDataMode() );
+            std::string k= (*it)->getName()+type;
+            oldSchema[k]=*it;
+        }
     }
-
-    m_columnDefs.clear();
+    m_columnDefs->clear();
     size_t numFields=pQueryResult->def().field_size();
     for(size_t i=0; i<numFields; i++){
-        //TODO: free this??
         Drill::FieldMetadata* fmd= new Drill::FieldMetadata;
         fmd->set(pQueryResult->def().field(i));
-        this->m_columnDefs.push_back(fmd);
+        this->m_columnDefs->push_back(fmd);
 
-        //Look for changes in the vector and trigger a Schema change event if necessary. 
+        //Look for changes in the vector and trigger a Schema change event if necessary.
         //If vectors are different, then call the schema change listener.
         char type[256];
         sprintf(type, ":%d:%d",fmd->getMinorType(), fmd->getDataMode() );
@@ -601,22 +772,27 @@ status_t DrillClientQueryResult::setupColumnDefs(exec::user::QueryResult* pQuery
     }
 
     //free memory allocated for FieldMetadata objects saved in previous columnDefs;
-    for(std::vector<Drill::FieldMetadata*>::iterator it = prevSchema.begin(); it != prevSchema.end(); ++it){
-        delete *it;    
+    if(!prevSchema->empty()){
+        for(std::vector<Drill::FieldMetadata*>::iterator it = prevSchema->begin(); it != prevSchema->end(); ++it){
+            delete *it;
+        }
     }
-    prevSchema.clear();
-    this->m_bHasSchemaChanged=hasSchemaChanged;
-    if(hasSchemaChanged){
-        //TODO: invoke schema change Listener
+    prevSchema->clear();
+    this->m_bHasSchemaChanged=hasSchemaChanged&&!isFirstIter;
+    if(this->m_bHasSchemaChanged){
+        //invoke schema change Listener
+        if(m_pSchemaListener!=NULL){
+            m_pSchemaListener(this, m_columnDefs, NULL);
+        }
     }
-    return hasSchemaChanged?QRY_SUCCESS_WITH_INFO:QRY_SUCCESS;
+    return this->m_bHasSchemaChanged?QRY_SUCCESS_WITH_INFO:QRY_SUCCESS;
 }
 
-status_t DrillClientQueryResult::defaultQueryResultsListener(void* ctx,  
-        RecordBatch* b, 
+status_t DrillClientQueryResult::defaultQueryResultsListener(void* ctx,
+        RecordBatch* b,
         DrillClientError* err) {
     //ctx; // unused, we already have the this pointer
-    BOOST_LOG_TRIVIAL(trace) << "Query result listener called" << std::endl;
+    DRILL_LOG(LOG_TRACE) << "Query result listener called" << std::endl;
     //check if the query has been canceled. IF so then return FAILURE. Caller will send cancel to the server.
     if(this->m_bCancel){
         return QRY_FAILURE;
@@ -625,7 +801,7 @@ status_t DrillClientQueryResult::defaultQueryResultsListener(void* ctx,
         // signal the cond var
         {
             #ifdef DEBUG
-            BOOST_LOG_TRIVIAL(debug)<<debugPrintQid(b->getQueryResult()->query_id())  
+            DRILL_LOG(LOG_DEBUG)<<debugPrintQid(b->getQueryResult()->query_id())
                 << "Query result listener saved result to queue." << std::endl;
             #endif
             boost::lock_guard<boost::mutex> cvLock(this->m_cvMutex);
@@ -644,7 +820,7 @@ RecordBatch*  DrillClientQueryResult::peekNext() {
     //if no more data, return NULL;
     if(!m_bIsQueryPending) return NULL;
     boost::unique_lock<boost::mutex> cvLock(this->m_cvMutex);
-    BOOST_LOG_TRIVIAL(trace) << "Synchronous read waiting for data." << std::endl;
+    DRILL_LOG(LOG_TRACE) << "Synchronous read waiting for data." << std::endl;
     while(!this->m_bHasData && !m_bHasError) {
         this->m_cv.wait(cvLock);
     }
@@ -656,11 +832,17 @@ RecordBatch*  DrillClientQueryResult::peekNext() {
 RecordBatch*  DrillClientQueryResult::getNext() {
     RecordBatch* pRecordBatch=NULL;
     //if no more data, return NULL;
-    if(!m_bIsQueryPending) return NULL;
+    if(!m_bIsQueryPending){
+        DRILL_LOG(LOG_TRACE) << "Query is done." << std::endl;
+        if(!m_recordBatches.empty()){
+            DRILL_LOG(LOG_TRACE) << " But there is a Record batch left behind." << std::endl;
+        }
+        return NULL;
+    }
 
     boost::unique_lock<boost::mutex> cvLock(this->m_cvMutex);
-    BOOST_LOG_TRIVIAL(trace) << "Synchronous read waiting for data." << std::endl;
-    while(!this->m_bHasData && !m_bHasError) {
+    DRILL_LOG(LOG_TRACE) << "Synchronous read waiting for data." << std::endl;
+    while(!this->m_bHasData && !m_bHasError){
         this->m_cv.wait(cvLock);
     }
     // remove first element from queue
@@ -709,33 +891,49 @@ void DrillClientQueryResult::signalError(DrillClientError* pErr){
 }
 
 void DrillClientQueryResult::clearAndDestroy(){
-    if(this->m_pQueryId!=NULL){
-        delete this->m_pQueryId; this->m_pQueryId=NULL;
-    }
     //free memory allocated for FieldMetadata objects saved in m_columnDefs;
-    for(std::vector<Drill::FieldMetadata*>::iterator it = m_columnDefs.begin(); it != m_columnDefs.end(); ++it){
-        delete *it;    
+    if(!m_columnDefs->empty()){
+        for(std::vector<Drill::FieldMetadata*>::iterator it = m_columnDefs->begin(); it != m_columnDefs->end(); ++it){
+            delete *it;
+        }
+        m_columnDefs->clear();
     }
-    m_columnDefs.clear();
     //Tell the parent to remove this from it's lists
     m_pClient->clearMapEntries(this);
+
+    //clear query id map entries.
+    if(this->m_pQueryId!=NULL){
+        delete this->m_pQueryId; this->m_pQueryId=NULL;
+    }
+    if(!m_recordBatches.empty()){
+        // When multiple qwueries execute in parallel we sometimes get an empty record batch back from the servrer _after_
+        // the last chunk has been received. We eventually delete it.
+        DRILL_LOG(LOG_TRACE) << "Freeing Record batch(es) left behind "<< std::endl;
+        RecordBatch* pR=NULL;
+        while(!m_recordBatches.empty()){
+            pR=m_recordBatches.front();
+            m_recordBatches.pop();
+            delete pR;
+        }
+    }
 }
 
-char ZookeeperImpl::s_drillRoot[]="/drill/drillbits1";
+char ZookeeperImpl::s_drillRoot[]="/drill/";
+char ZookeeperImpl::s_defaultCluster[]="drillbits1";
 
-ZookeeperImpl::ZookeeperImpl(){ 
+ZookeeperImpl::ZookeeperImpl(){
     m_pDrillbits=new String_vector;
     srand (time(NULL));
     m_bConnecting=true;
     memset(&m_id, 0, sizeof(m_id));
 }
 
-ZookeeperImpl::~ZookeeperImpl(){ 
+ZookeeperImpl::~ZookeeperImpl(){
     delete m_pDrillbits;
 }
 
 ZooLogLevel ZookeeperImpl::getZkLogLevel(){
-    //typedef enum {ZOO_LOG_LEVEL_ERROR=1, 
+    //typedef enum {ZOO_LOG_LEVEL_ERROR=1,
     //    ZOO_LOG_LEVEL_WARN=2,
     //    ZOO_LOG_LEVEL_INFO=3,
     //    ZOO_LOG_LEVEL_DEBUG=4
@@ -752,11 +950,11 @@ ZooLogLevel ZookeeperImpl::getZkLogLevel(){
         case LOG_FATAL:
         default:
             return ZOO_LOG_LEVEL_ERROR;
-    } 
+    }
     return ZOO_LOG_LEVEL_ERROR;
 }
 
-int ZookeeperImpl::connectToZookeeper(const char* connectStr){
+int ZookeeperImpl::connectToZookeeper(const char* connectStr, const char* pathToDrill){
     uint32_t waitTime=30000; // 10 seconds
     zoo_set_debug_level(getZkLogLevel());
     zoo_deterministic_conn_order(1); // enable deterministic order
@@ -780,24 +978,31 @@ int ZookeeperImpl::connectToZookeeper(const char* connectStr){
         return CONN_FAILURE;
     }
     int rc = ZOK;
-    rc=zoo_get_children(m_zh, (char*)s_drillRoot, 0, m_pDrillbits);
+    char rootDir[MAX_CONNECT_STR+1];
+    if(pathToDrill==NULL || strlen(pathToDrill)==0){
+        strcpy(rootDir, (char*)s_drillRoot);
+        strcat(rootDir, s_defaultCluster);
+    }else{
+        strncpy(rootDir, pathToDrill, MAX_CONNECT_STR); rootDir[MAX_CONNECT_STR]=0;
+    }
+    rc=zoo_get_children(m_zh, (char*)rootDir, 0, m_pDrillbits);
     if(rc!=ZOK){
         m_err=getMessage(ERR_CONN_ZKERR, rc);
         zookeeper_close(m_zh);
         return -1;
     }
 
-
     //Let's pick a random drillbit.
     if(m_pDrillbits && m_pDrillbits->count >0){
         int r=rand()%(this->m_pDrillbits->count);
         assert(r<this->m_pDrillbits->count);
         char * bit=this->m_pDrillbits->data[r];
         std::string s;
-        s=s_drillRoot +  std::string("/") + bit;
-        int buffer_len=1024;
-        char buffer[1024];
+        s=rootDir +  std::string("/") + bit;
+        int buffer_len=MAX_CONNECT_STR;
+        char buffer[MAX_CONNECT_STR+1];
         struct Stat stat;
+        buffer[MAX_CONNECT_STR]=0;
         rc= zoo_get(m_zh, s.c_str(), 0, buffer,  &buffer_len, &stat);
         if(rc!=ZOK){
             m_err=getMessage(ERR_CONN_ZKDBITERR, rc);
@@ -840,7 +1045,7 @@ void ZookeeperImpl::watcher(zhandle_t *zzh, int type, int state, const char *pat
     // signal the cond var
     {
         if (state == ZOO_CONNECTED_STATE){
-            BOOST_LOG_TRIVIAL(trace) << "Connected to Zookeeper." << std::endl;
+            DRILL_LOG(LOG_TRACE) << "Connected to Zookeeper." << std::endl;
         }
         boost::lock_guard<boost::mutex> bufferLock(self->m_cvMutex);
         self->m_bConnecting=false;
@@ -850,7 +1055,7 @@ void ZookeeperImpl::watcher(zhandle_t *zzh, int type, int state, const char *pat
 
 void ZookeeperImpl:: debugPrint(){
     if(m_zh!=NULL && m_state==ZOO_CONNECTED_STATE){
-        BOOST_LOG_TRIVIAL(trace) << m_drillServiceInstance.DebugString();
+        DRILL_LOG(LOG_TRACE) << m_drillServiceInstance.DebugString() << std::endl;
     }
 }
 


[16/32] git commit: DRILL-748: C++ Client. Support timestamp/date before unix time and handle y2028 problem.

Posted by ja...@apache.org.
DRILL-748: C++ Client. Support timestamp/date before unix time and handle y2028 problem.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/b90956e3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/b90956e3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/b90956e3

Branch: refs/heads/master
Commit: b90956e3afc96df3968d2e7882b449c506ea0924
Parents: aaa4db7
Author: Xiao Meng <xi...@gmail.com>
Authored: Fri May 30 11:31:44 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Thu Jun 19 20:30:36 2014 -0700

----------------------------------------------------------------------
 LICENSE                                         |  26 +
 contrib/native/client/CMakeLists.txt            |   9 +-
 .../native/client/src/clientlib/CMakeLists.txt  |   2 +-
 .../native/client/src/clientlib/recordBatch.cpp |  80 +-
 .../client/src/clientlib/y2038/CMakeLists.txt   |  40 +
 .../native/client/src/clientlib/y2038/time64.c  | 830 +++++++++++++++++++
 .../native/client/src/clientlib/y2038/time64.h  | 110 +++
 .../client/src/clientlib/y2038/time64_config.h  | 107 +++
 .../client/src/clientlib/y2038/time64_limits.h  | 124 +++
 .../native/client/src/include/drill/drillc.hpp  |   2 +-
 .../client/src/include/drill/recordBatch.hpp    |  23 +-
 11 files changed, 1318 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/LICENSE
----------------------------------------------------------------------
diff --git a/LICENSE b/LICENSE
index 98d9950..f5352b1 100644
--- a/LICENSE
+++ b/LICENSE
@@ -287,6 +287,7 @@ For
   XML Commons External Components XML APIs (http://xml.apache.org/commons/#external) xml-apis:xml-apis:jar:1.0.b2
   XML Commons External Components XML APIs (http://xml.apache.org/commons/components/external/) xml-apis:xml-apis:jar:1.3.04
   Xml Compatibility extensions for Jackson (http://jackson.codehaus.org) org.codehaus.jackson:jackson-xc:jar:1.7.1
+  Y2038 (https://github.com/schwern/y2038)
 
   The Apache License, Version 2.0
 
@@ -678,3 +679,28 @@ For
  Reflections (http://code.google.com/p/reflections/) org.reflections:reflections:jar:0.9.8
 
   License: WTFPL  (http://en.wikipedia.org/wiki/WTFPL)
+
+For
+ Y2038 (https://github.com/schwern/y2038)
+
+The MIT License
+
+Copyright (c) 2007-2010 Michael G Schwern
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/contrib/native/client/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/contrib/native/client/CMakeLists.txt b/contrib/native/client/CMakeLists.txt
index 9ac705b..31ac472 100644
--- a/contrib/native/client/CMakeLists.txt
+++ b/contrib/native/client/CMakeLists.txt
@@ -56,9 +56,12 @@ add_subdirectory("${CMAKE_SOURCE_DIR}/src/protobuf")
 include_directories(${ProtoHeadersDir})
 include_directories(${ProtoIncludesDir})
 
+# Y2038 library
+add_subdirectory("${CMAKE_SOURCE_DIR}/src/clientlib/y2038")
+
 # Build the Client Library as a shared library
 add_subdirectory("${CMAKE_SOURCE_DIR}/src/clientlib")
-include_directories(${CMAKE_SOURCE_DIR}/src/include ${Zookeeper_INCLUDE_DIRS}  )
+include_directories(${CMAKE_SOURCE_DIR}/src/include ${Zookeeper_INCLUDE_DIRS})
 
 # add a DEBUG preprocessor macro
 set_property(
@@ -71,6 +74,4 @@ link_directories(/usr/local/lib)
 
 add_executable(querySubmitter example/querySubmitter.cpp )
 
-target_link_libraries(querySubmitter ${Boost_LIBRARIES} ${PROTOBUF_LIBRARY} drillClient protomsgs )
-
-
+target_link_libraries(querySubmitter ${Boost_LIBRARIES} ${PROTOBUF_LIBRARY} drillClient protomsgs y2038)

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/contrib/native/client/src/clientlib/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/CMakeLists.txt b/contrib/native/client/src/clientlib/CMakeLists.txt
index 7cd5dfb..37f4734 100644
--- a/contrib/native/client/src/clientlib/CMakeLists.txt
+++ b/contrib/native/client/src/clientlib/CMakeLists.txt
@@ -41,4 +41,4 @@ set_property(
     )
 
 add_library(drillClient SHARED ${CLIENTLIB_SRC_FILES} )
-target_link_libraries(drillClient ${Boost_LIBRARIES} ${PROTOBUF_LIBRARY} ${Zookeeper_LIBRARIES} protomsgs)
+target_link_libraries(drillClient ${Boost_LIBRARIES} ${PROTOBUF_LIBRARY} ${Zookeeper_LIBRARIES} protomsgs y2038)

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/contrib/native/client/src/clientlib/recordBatch.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/recordBatch.cpp b/contrib/native/client/src/clientlib/recordBatch.cpp
index 27a592d..90fe11a 100644
--- a/contrib/native/client/src/clientlib/recordBatch.cpp
+++ b/contrib/native/client/src/clientlib/recordBatch.cpp
@@ -24,6 +24,10 @@ const uint32_t YEARS_TO_MONTHS=12;
 const uint32_t HOURS_TO_MILLIS=60*60*1000;
 const uint32_t MINUTES_TO_MILLIS=60*1000;
 const uint32_t SECONDS_TO_MILLIS=1000;
+extern "C"
+{
+    #include "y2038/time64.h"
+}
 
 namespace Drill{
 
@@ -196,9 +200,9 @@ ValueVectorBase* ValueVectorFactory::allocateValueVector(const Drill::FieldMetad
                 case common::DECIMAL38SPARSE:
                     return new ValueVectorDecimal38Sparse(b,f.getValueCount(), f.getScale());
                 case common::DATE:
-                    return new ValueVectorTyped<DateHolder, uint64_t>(b,f.getValueCount());
+                    return new ValueVectorTyped<DateHolder, int64_t>(b,f.getValueCount());
                 case common::TIMESTAMP:
-                    return new ValueVectorTyped<DateTimeHolder, uint64_t>(b,f.getValueCount());
+                    return new ValueVectorTyped<DateTimeHolder, int64_t>(b,f.getValueCount());
                 case common::TIME:
                     return new ValueVectorTyped<TimeHolder, uint32_t>(b,f.getValueCount());
                 case common::TIMESTAMPTZ:
@@ -235,10 +239,10 @@ ValueVectorBase* ValueVectorFactory::allocateValueVector(const Drill::FieldMetad
                     return new NullableValueVectorFixed<double>(b,f.getValueCount());
                 case common::DATE:
                     return new NullableValueVectorTyped<DateHolder,
-                           ValueVectorTyped<DateHolder, uint64_t> >(b,f.getValueCount());
+                           ValueVectorTyped<DateHolder, int64_t> >(b,f.getValueCount());
                 case common::TIMESTAMP:
                     return new NullableValueVectorTyped<DateTimeHolder,
-                           ValueVectorTyped<DateTimeHolder, uint64_t> >(b,f.getValueCount());
+                           ValueVectorTyped<DateTimeHolder, int64_t> >(b,f.getValueCount());
                 case common::TIME:
                     return new NullableValueVectorTyped<TimeHolder,
                            ValueVectorTyped<TimeHolder, uint32_t> >(b,f.getValueCount());
@@ -357,12 +361,48 @@ void DateHolder::load(){
     m_year=1970;
     m_month=1;
     m_day=1;
-
-    time_t  t= m_datetime/1000; // number of seconds since beginning of the Unix Epoch.
-    struct tm * tm = gmtime(&t);
-    m_year=tm->tm_year+1900;
-    m_month=tm->tm_mon+1;
-    m_day=tm->tm_mday;
+    const Time64_T  t= m_datetime/1000; // number of seconds since beginning of the Unix Epoch.
+    /*
+    TL;DR
+
+    The gmttime in standard libray on windows platform cannot represent the date before Unix Epoch.
+    http://msdn.microsoft.com/en-us/library/0z9czt0w(v=vs.100).aspx
+
+    """
+    Both the 32-bit and 64-bit versions of gmtime, mktime, mkgmtime, and localtime all use one tm structure per thread for the conversion.
+    Each call to one of these functions destroys the result of any previous call.
+    If timer represents a date before midnight, January 1, 1970, gmtime returns NULL.  There is no error return.
+
+    _gmtime64, which uses the __time64_t structure, enables dates to be expressed up through 23:59:59, December 31, 3000, UTC,
+    whereas _gmtime32 only represent dates through 03:14:07 January 19, 2038, UTC.
+    Midnight, January 1, 1970, is the lower bound of the date range for both of these functions.
+
+    gmtime is an inline function that evaluates to _gmtime64 and time_t is equivalent to __time64_t unless _USE_32BIT_TIME_T is defined.
+    """
+
+    An alternative could be boost date_time libraray.
+
+    ```
+    #include <boost/date_time/posix_time/posix_time.hpp>
+    using namespace boost::posix_time;
+    ptime pt = from_time_t(t);
+    struct tm d = to_tm(pt);
+    ```
+
+    Howerver, boost date_time library still has year 2038 problem which is still not fixed.
+    https://svn.boost.org/trac/boost/ticket/4543
+    One reason is that the library converts the 64-bit`time_t t` into `seconds` type to get posix time.
+    But boost uses `long` to represent `seconds`, which is 4 bytes on windows platform,
+    http://msdn.microsoft.com/en-us/library/s3f49ktz.aspx
+
+    We eventually choose the third-party MIT-licensed library written with ANSI C.
+    https://github.com/schwern/y2038
+    */
+    struct TM d;
+    gmtime64_r(&t,&d);
+    m_year=1900 + static_cast<int32_t>(d.tm_year);
+    m_month=d.tm_mon + 1;
+    m_day=d.tm_mday;
 }
 
 std::string DateHolder::toString(){
@@ -400,15 +440,19 @@ void DateTimeHolder::load(){
     m_sec=0;
     m_msec=0;
 
-    time_t  t= m_datetime/1000; // number of seconds since beginning of the Unix Epoch.
-    struct tm * tm = gmtime(&t);
-    m_year=tm->tm_year+1900;
-    m_month=tm->tm_mon+1;
-    m_day=tm->tm_mday;
-    m_hr=tm->tm_hour;
-    m_min=tm->tm_min;
-    m_sec=tm->tm_sec;
+    const Time64_T  t=m_datetime/1000; // number of seconds since beginning of the Unix Epoch.
+
+    struct TM dt;
+    gmtime64_r(&t,&dt);
+
+    m_year=1900 + dt.tm_year;
+    m_month=dt.tm_mon + 1;
+    m_day=dt.tm_mday;
+    m_hr=dt.tm_hour;
+    m_min=dt.tm_min;
+    m_sec=dt.tm_sec;
     m_msec=m_datetime%1000;
+
 }
 
 std::string DateTimeHolder::toString(){

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/contrib/native/client/src/clientlib/y2038/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/y2038/CMakeLists.txt b/contrib/native/client/src/clientlib/y2038/CMakeLists.txt
new file mode 100644
index 0000000..1cb4d72
--- /dev/null
+++ b/contrib/native/client/src/clientlib/y2038/CMakeLists.txt
@@ -0,0 +1,40 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Y2038 library
+
+if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_GNUCC)
+    set(CMAKE_CXX_FLAGS "-fPIC")
+endif()
+
+if(CMAKE_COMPILER_IS_GNUCC)
+    set(CMAKE_C_FLAGS "-fPIC")
+endif()
+
+set (Y2038_SRC_FILES
+    ${CMAKE_CURRENT_SOURCE_DIR}/time64.c
+    )
+
+include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/../../include )
+
+set_property(
+    DIRECTORY
+    PROPERTY COMPILE_DEFINITIONS_DEBUG DEBUG DEBUG=1 THREADED
+    )
+
+add_library(y2038 STATIC ${Y2038_SRC_FILES} )

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/contrib/native/client/src/clientlib/y2038/time64.c
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/y2038/time64.c b/contrib/native/client/src/clientlib/y2038/time64.c
new file mode 100644
index 0000000..e0d61c8
--- /dev/null
+++ b/contrib/native/client/src/clientlib/y2038/time64.c
@@ -0,0 +1,830 @@
+/* 
+
+Copyright (c) 2007-2010  Michael G Schwern
+
+This software originally derived from Paul Sheer's pivotal_gmtime_r.c.
+
+The MIT License:
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
+/*
+
+Programmers who have available to them 64-bit time values as a 'long
+long' type can use localtime64_r() and gmtime64_r() which correctly
+converts the time even on 32-bit systems. Whether you have 64-bit time 
+values will depend on the operating system.
+
+localtime64_r() is a 64-bit equivalent of localtime_r().
+
+gmtime64_r() is a 64-bit equivalent of gmtime_r().
+
+*/
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+#include "time64.h"
+#include "time64_limits.h"
+
+
+/* Spec says except for stftime() and the _r() functions, these
+   all return static memory.  Stabbings! */
+static struct TM   Static_Return_Date;
+static char        Static_Return_String[35];
+
+static const char days_in_month[2][12] = {
+    {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
+    {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
+};
+
+static const short julian_days_by_month[2][12] = {
+    {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334},
+    {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335},
+};
+
+static char wday_name[7][4] = {
+    "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
+};
+
+static char mon_name[12][4] = {
+    "Jan", "Feb", "Mar", "Apr", "May", "Jun",
+    "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
+};
+
+static const short length_of_year[2] = { 365, 366 };
+
+/* Some numbers relating to the gregorian cycle */
+static const Year     years_in_gregorian_cycle   = 400;
+#define               days_in_gregorian_cycle      ((365 * 400) + 100 - 4 + 1)
+static const Time64_T seconds_in_gregorian_cycle = days_in_gregorian_cycle * 60LL * 60LL * 24LL;
+
+/* Year range we can trust the time funcitons with */
+#define MAX_SAFE_YEAR 2037
+#define MIN_SAFE_YEAR 1971
+
+/* 28 year Julian calendar cycle */
+#define SOLAR_CYCLE_LENGTH 28
+
+/* Year cycle from MAX_SAFE_YEAR down. */
+static const short safe_years_high[SOLAR_CYCLE_LENGTH] = {
+    2016, 2017, 2018, 2019,
+    2020, 2021, 2022, 2023,
+    2024, 2025, 2026, 2027,
+    2028, 2029, 2030, 2031,
+    2032, 2033, 2034, 2035,
+    2036, 2037, 2010, 2011,
+    2012, 2013, 2014, 2015
+};
+
+/* Year cycle from MIN_SAFE_YEAR up */
+static const int safe_years_low[SOLAR_CYCLE_LENGTH] = {
+    1996, 1997, 1998, 1971,
+    1972, 1973, 1974, 1975,
+    1976, 1977, 1978, 1979,
+    1980, 1981, 1982, 1983,
+    1984, 1985, 1986, 1987,
+    1988, 1989, 1990, 1991,
+    1992, 1993, 1994, 1995,
+};
+
+/* This isn't used, but it's handy to look at */
+static const char dow_year_start[SOLAR_CYCLE_LENGTH] = {
+    5, 0, 1, 2,     /* 0       2016 - 2019 */
+    3, 5, 6, 0,     /* 4  */
+    1, 3, 4, 5,     /* 8       1996 - 1998, 1971*/
+    6, 1, 2, 3,     /* 12      1972 - 1975 */
+    4, 6, 0, 1,     /* 16 */
+    2, 4, 5, 6,     /* 20      2036, 2037, 2010, 2011 */
+    0, 2, 3, 4      /* 24      2012, 2013, 2014, 2015 */
+};
+
+/* Let's assume people are going to be looking for dates in the future.
+   Let's provide some cheats so you can skip ahead.
+   This has a 4x speed boost when near 2008.
+*/
+/* Number of days since epoch on Jan 1st, 2008 GMT */
+#define CHEAT_DAYS  (1199145600 / 24 / 60 / 60)
+#define CHEAT_YEARS 108
+
+#define IS_LEAP(n)      ((!(((n) + 1900) % 400) || (!(((n) + 1900) % 4) && (((n) + 1900) % 100))) != 0)
+#define WRAP(a,b,m)     ((a) = ((a) <  0  ) ? ((b)--, (a) + (m)) : (a))
+
+#ifdef USE_SYSTEM_LOCALTIME
+#    define SHOULD_USE_SYSTEM_LOCALTIME(a)  (       \
+    (a) <= SYSTEM_LOCALTIME_MAX &&              \
+    (a) >= SYSTEM_LOCALTIME_MIN                 \
+)
+#else
+#    define SHOULD_USE_SYSTEM_LOCALTIME(a)      (0)
+#endif
+
+#ifdef USE_SYSTEM_GMTIME
+#    define SHOULD_USE_SYSTEM_GMTIME(a)     (       \
+    (a) <= SYSTEM_GMTIME_MAX    &&              \
+    (a) >= SYSTEM_GMTIME_MIN                    \
+)
+#else
+#    define SHOULD_USE_SYSTEM_GMTIME(a)         (0)
+#endif
+
+/* Multi varadic macros are a C99 thing, alas */
+#ifdef TIME_64_DEBUG
+#    define TIME64_TRACE(format) (fprintf(stderr, format))
+#    define TIME64_TRACE1(format, var1)    (fprintf(stderr, format, var1))
+#    define TIME64_TRACE2(format, var1, var2)    (fprintf(stderr, format, var1, var2))
+#    define TIME64_TRACE3(format, var1, var2, var3)    (fprintf(stderr, format, var1, var2, var3))
+#else
+#    define TIME64_TRACE(format) ((void)0)
+#    define TIME64_TRACE1(format, var1) ((void)0)
+#    define TIME64_TRACE2(format, var1, var2) ((void)0)
+#    define TIME64_TRACE3(format, var1, var2, var3) ((void)0)
+#endif
+
+
+static int is_exception_century(Year year)
+{
+    int is_exception = ((year % 100 == 0) && !(year % 400 == 0));
+    TIME64_TRACE1("# is_exception_century: %s\n", is_exception ? "yes" : "no");
+
+    return(is_exception);
+}
+
+
+/* Compare two dates.
+   The result is like cmp.
+   Ignores things like gmtoffset and dst
+*/
+int cmp_date( const struct TM* left, const struct tm* right ) {
+    if( left->tm_year > right->tm_year )
+        return 1;
+    else if( left->tm_year < right->tm_year )
+        return -1;
+
+    if( left->tm_mon > right->tm_mon )
+        return 1;
+    else if( left->tm_mon < right->tm_mon )
+        return -1;
+
+    if( left->tm_mday > right->tm_mday )
+        return 1;
+    else if( left->tm_mday < right->tm_mday )
+        return -1;
+
+    if( left->tm_hour > right->tm_hour )
+        return 1;
+    else if( left->tm_hour < right->tm_hour )
+        return -1;
+
+    if( left->tm_min > right->tm_min )
+        return 1;
+    else if( left->tm_min < right->tm_min )
+        return -1;
+
+    if( left->tm_sec > right->tm_sec )
+        return 1;
+    else if( left->tm_sec < right->tm_sec )
+        return -1;
+
+    return 0;
+}
+
+
+/* Check if a date is safely inside a range.
+   The intention is to check if its a few days inside.
+*/
+int date_in_safe_range( const struct TM* date, const struct tm* min, const struct tm* max ) {
+    if( cmp_date(date, min) == -1 )
+        return 0;
+
+    if( cmp_date(date, max) == 1 )
+        return 0;
+
+    return 1;
+}
+
+
+/* timegm() is not in the C or POSIX spec, but it is such a useful
+   extension I would be remiss in leaving it out.  Also I need it
+   for localtime64()
+*/
+Time64_T timegm64(const struct TM *date) {
+    Time64_T days    = 0;
+    Time64_T seconds = 0;
+    Year     year;
+    Year     orig_year = (Year)date->tm_year;
+    int      cycles  = 0;
+
+    if( orig_year > 100 ) {
+        cycles = (orig_year - 100) / 400;
+        orig_year -= cycles * 400;
+        days      += (Time64_T)cycles * days_in_gregorian_cycle;
+    }
+    else if( orig_year < -300 ) {
+        cycles = (orig_year - 100) / 400;
+        orig_year -= cycles * 400;
+        days      += (Time64_T)cycles * days_in_gregorian_cycle;
+    }
+    TIME64_TRACE3("# timegm/ cycles: %d, days: %lld, orig_year: %lld\n", cycles, days, orig_year);
+
+    if( orig_year > 70 ) {
+        year = 70;
+        while( year < orig_year ) {
+            days += length_of_year[IS_LEAP(year)];
+            year++;
+        }
+    }
+    else if ( orig_year < 70 ) {
+        year = 69;
+        do {
+            days -= length_of_year[IS_LEAP(year)];
+            year--;
+        } while( year >= orig_year );
+    }
+
+    days += julian_days_by_month[IS_LEAP(orig_year)][date->tm_mon];
+    days += date->tm_mday - 1;
+
+    seconds = days * 60 * 60 * 24;
+
+    seconds += date->tm_hour * 60 * 60;
+    seconds += date->tm_min * 60;
+    seconds += date->tm_sec;
+
+    return(seconds);
+}
+
+
+static int check_tm(struct TM *tm)
+{
+    /* Don't forget leap seconds */
+    assert(tm->tm_sec >= 0);
+    assert(tm->tm_sec <= 61);
+
+    assert(tm->tm_min >= 0);
+    assert(tm->tm_min <= 59);
+
+    assert(tm->tm_hour >= 0);
+    assert(tm->tm_hour <= 23);
+
+    assert(tm->tm_mday >= 1);
+    assert(tm->tm_mday <= days_in_month[IS_LEAP(tm->tm_year)][tm->tm_mon]);
+
+    assert(tm->tm_mon  >= 0);
+    assert(tm->tm_mon  <= 11);
+
+    assert(tm->tm_wday >= 0);
+    assert(tm->tm_wday <= 6);
+    
+    assert(tm->tm_yday >= 0);
+    assert(tm->tm_yday <= length_of_year[IS_LEAP(tm->tm_year)]);
+
+#ifdef HAS_TM_TM_GMTOFF
+    assert(tm->tm_gmtoff >= -24 * 60 * 60);
+    assert(tm->tm_gmtoff <=  24 * 60 * 60);
+#endif
+
+    return 1;
+}
+
+
+/* The exceptional centuries without leap years cause the cycle to
+   shift by 16
+*/
+static Year cycle_offset(Year year)
+{
+    const Year start_year = 2000;
+    Year year_diff  = year - start_year;
+    Year exceptions;
+
+    if( year > start_year )
+        year_diff--;
+
+    exceptions  = year_diff / 100;
+    exceptions -= year_diff / 400;
+
+    TIME64_TRACE3("# year: %lld, exceptions: %lld, year_diff: %lld\n",
+          year, exceptions, year_diff);
+
+    return exceptions * 16;
+}
+
+/* For a given year after 2038, pick the latest possible matching
+   year in the 28 year calendar cycle.
+
+   A matching year...
+   1) Starts on the same day of the week.
+   2) Has the same leap year status.
+
+   This is so the calendars match up.
+
+   Also the previous year must match.  When doing Jan 1st you might
+   wind up on Dec 31st the previous year when doing a -UTC time zone.
+
+   Finally, the next year must have the same start day of week.  This
+   is for Dec 31st with a +UTC time zone.
+   It doesn't need the same leap year status since we only care about
+   January 1st.
+*/
+static int safe_year(const Year year)
+{
+    int safe_year;
+    Year year_cycle;
+
+    if( year >= MIN_SAFE_YEAR && year <= MAX_SAFE_YEAR ) {
+        return (int)year;
+    }
+
+    year_cycle = year + cycle_offset(year);
+
+    /* safe_years_low is off from safe_years_high by 8 years */
+    if( year < MIN_SAFE_YEAR )
+        year_cycle -= 8;
+
+    /* Change non-leap xx00 years to an equivalent */
+    if( is_exception_century(year) )
+        year_cycle += 11;
+
+    /* Also xx01 years, since the previous year will be wrong */
+    if( is_exception_century(year - 1) )
+        year_cycle += 17;
+
+    year_cycle %= SOLAR_CYCLE_LENGTH;
+    if( year_cycle < 0 ) 
+        year_cycle = SOLAR_CYCLE_LENGTH + year_cycle;
+
+    assert( year_cycle >= 0 );
+    assert( year_cycle < SOLAR_CYCLE_LENGTH );
+    if( year < MIN_SAFE_YEAR )
+        safe_year = safe_years_low[year_cycle];
+    else if( year > MAX_SAFE_YEAR )
+        safe_year = safe_years_high[year_cycle];
+    else
+        assert(0);
+
+    TIME64_TRACE3("# year: %lld, year_cycle: %lld, safe_year: %d\n",
+          year, year_cycle, safe_year);
+
+    assert(safe_year <= MAX_SAFE_YEAR && safe_year >= MIN_SAFE_YEAR);
+
+    return safe_year;
+}
+
+
+void copy_tm_to_TM64(const struct tm *src, struct TM *dest) {
+    if( src == NULL ) {
+        memset(dest, 0, sizeof(*dest));
+    }
+    else {
+#       ifdef USE_TM64
+            dest->tm_sec        = src->tm_sec;
+            dest->tm_min        = src->tm_min;
+            dest->tm_hour       = src->tm_hour;
+            dest->tm_mday       = src->tm_mday;
+            dest->tm_mon        = src->tm_mon;
+            dest->tm_year       = (Year)src->tm_year;
+            dest->tm_wday       = src->tm_wday;
+            dest->tm_yday       = src->tm_yday;
+            dest->tm_isdst      = src->tm_isdst;
+
+#           ifdef HAS_TM_TM_GMTOFF
+                dest->tm_gmtoff  = src->tm_gmtoff;
+#           endif
+
+#           ifdef HAS_TM_TM_ZONE
+                dest->tm_zone  = src->tm_zone;
+#           endif
+
+#       else
+            /* They're the same type */
+            memcpy(dest, src, sizeof(*dest));
+#       endif
+    }
+}
+
+
+void copy_TM64_to_tm(const struct TM *src, struct tm *dest) {
+    if( src == NULL ) {
+        memset(dest, 0, sizeof(*dest));
+    }
+    else {
+#       ifdef USE_TM64
+            dest->tm_sec        = src->tm_sec;
+            dest->tm_min        = src->tm_min;
+            dest->tm_hour       = src->tm_hour;
+            dest->tm_mday       = src->tm_mday;
+            dest->tm_mon        = src->tm_mon;
+            dest->tm_year       = (int)src->tm_year;
+            dest->tm_wday       = src->tm_wday;
+            dest->tm_yday       = src->tm_yday;
+            dest->tm_isdst      = src->tm_isdst;
+
+#           ifdef HAS_TM_TM_GMTOFF
+                dest->tm_gmtoff  = src->tm_gmtoff;
+#           endif
+
+#           ifdef HAS_TM_TM_ZONE
+                dest->tm_zone  = src->tm_zone;
+#           endif
+
+#       else
+            /* They're the same type */
+            memcpy(dest, src, sizeof(*dest));
+#       endif
+    }
+}
+
+
+/* Simulate localtime_r() to the best of our ability */
+struct tm * fake_localtime_r(const time_t *time, struct tm *result) {
+    const struct tm *static_result = localtime(time);
+
+    assert(result != NULL);
+
+    if( static_result == NULL ) {
+        memset(result, 0, sizeof(*result));
+        return NULL;
+    }
+    else {
+        memcpy(result, static_result, sizeof(*result));
+        return result;
+    }
+}
+
+
+/* Simulate gmtime_r() to the best of our ability */
+struct tm * fake_gmtime_r(const time_t *time, struct tm *result) {
+    const struct tm *static_result = gmtime(time);
+
+    assert(result != NULL);
+
+    if( static_result == NULL ) {
+        memset(result, 0, sizeof(*result));
+        return NULL;
+    }
+    else {
+        memcpy(result, static_result, sizeof(*result));
+        return result;
+    }
+}
+
+
+static Time64_T seconds_between_years(Year left_year, Year right_year) {
+    int increment = (left_year > right_year) ? 1 : -1;
+    Time64_T seconds = 0;
+    int cycles;
+
+    if( left_year > 2400 ) {
+        cycles = (left_year - 2400) / 400;
+        left_year -= cycles * 400;
+        seconds   += cycles * seconds_in_gregorian_cycle;
+    }
+    else if( left_year < 1600 ) {
+        cycles = (left_year - 1600) / 400;
+        left_year += cycles * 400;
+        seconds   += cycles * seconds_in_gregorian_cycle;
+    }
+
+    while( left_year != right_year ) {
+        seconds += length_of_year[IS_LEAP(right_year - 1900)] * 60 * 60 * 24;
+        right_year += increment;
+    }
+
+    return seconds * increment;
+}
+
+
+Time64_T mktime64(struct TM *input_date) {
+    struct tm safe_date;
+    struct TM date;
+    Time64_T  time;
+    Year      year = input_date->tm_year + 1900;
+
+    if( date_in_safe_range(input_date, &SYSTEM_MKTIME_MIN, &SYSTEM_MKTIME_MAX) )
+    {
+        copy_TM64_to_tm(input_date, &safe_date);
+        time = (Time64_T)mktime(&safe_date);
+
+        /* Correct the possibly out of bound input date */
+        copy_tm_to_TM64(&safe_date, input_date);
+        return time;
+    }
+
+    /* Have to make the year safe in date else it won't fit in safe_date */
+    date = *input_date;
+    date.tm_year = safe_year(year) - 1900;
+    copy_TM64_to_tm(&date, &safe_date);
+
+    time = (Time64_T)mktime(&safe_date);
+
+    /* Correct the user's possibly out of bound input date */
+    copy_tm_to_TM64(&safe_date, input_date);
+
+    time += seconds_between_years(year, (Year)(safe_date.tm_year + 1900));
+
+    return time;
+}
+
+
+/* Because I think mktime() is a crappy name */
+Time64_T timelocal64(struct TM *date) {
+    return mktime64(date);
+}
+
+
+struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p)
+{
+    int v_tm_sec, v_tm_min, v_tm_hour, v_tm_mon, v_tm_wday;
+    Time64_T v_tm_tday;
+    int leap;
+    Time64_T m;
+    Time64_T time = *in_time;
+    Year year = 70;
+    int cycles = 0;
+
+    assert(p != NULL);
+
+    /* Use the system gmtime() if time_t is small enough */
+    if( SHOULD_USE_SYSTEM_GMTIME(*in_time) ) {
+        time_t safe_time = (time_t)*in_time;
+        struct tm safe_date;
+        GMTIME_R(&safe_time, &safe_date);
+
+        copy_tm_to_TM64(&safe_date, p);
+        assert(check_tm(p));
+
+        return p;
+    }
+
+#ifdef HAS_TM_TM_GMTOFF
+    p->tm_gmtoff = 0;
+#endif
+    p->tm_isdst  = 0;
+
+#ifdef HAS_TM_TM_ZONE
+    p->tm_zone   = "UTC";
+#endif
+
+    v_tm_sec =  (int)(time % 60);
+    time /= 60;
+    v_tm_min =  (int)(time % 60);
+    time /= 60;
+    v_tm_hour = (int)(time % 24);
+    time /= 24;
+    v_tm_tday = time;
+
+    WRAP (v_tm_sec, v_tm_min, 60);
+    WRAP (v_tm_min, v_tm_hour, 60);
+    WRAP (v_tm_hour, v_tm_tday, 24);
+
+    v_tm_wday = (int)((v_tm_tday + 4) % 7);
+    if (v_tm_wday < 0)
+        v_tm_wday += 7;
+    m = v_tm_tday;
+
+    if (m >= CHEAT_DAYS) {
+        year = CHEAT_YEARS;
+        m -= CHEAT_DAYS;
+    }
+
+    if (m >= 0) {
+        /* Gregorian cycles, this is huge optimization for distant times */
+        cycles = (int)(m / (Time64_T) days_in_gregorian_cycle);
+        if( cycles ) {
+            m -= (cycles * (Time64_T) days_in_gregorian_cycle);
+            year += (cycles * years_in_gregorian_cycle);
+        }
+
+        /* Years */
+        leap = IS_LEAP (year);
+        while (m >= (Time64_T) length_of_year[leap]) {
+            m -= (Time64_T) length_of_year[leap];
+            year++;
+            leap = IS_LEAP (year);
+        }
+
+        /* Months */
+        v_tm_mon = 0;
+        while (m >= (Time64_T) days_in_month[leap][v_tm_mon]) {
+            m -= (Time64_T) days_in_month[leap][v_tm_mon];
+            v_tm_mon++;
+        }
+    } else {
+        year--;
+
+        /* Gregorian cycles */
+        cycles = (int)((m / (Time64_T) days_in_gregorian_cycle) + 1);
+        if( cycles ) {
+            m -= (cycles * (Time64_T) days_in_gregorian_cycle);
+            year += (cycles * years_in_gregorian_cycle);
+        }
+
+        /* Years */
+        leap = IS_LEAP (year);
+        while (m < (Time64_T) -length_of_year[leap]) {
+            m += (Time64_T) length_of_year[leap];
+            year--;
+            leap = IS_LEAP (year);
+        }
+
+        /* Months */
+        v_tm_mon = 11;
+        while (m < (Time64_T) -days_in_month[leap][v_tm_mon]) {
+            m += (Time64_T) days_in_month[leap][v_tm_mon];
+            v_tm_mon--;
+        }
+        m += (Time64_T) days_in_month[leap][v_tm_mon];
+    }
+
+    p->tm_year = year;
+    if( p->tm_year != year ) {
+#ifdef EOVERFLOW
+        errno = EOVERFLOW;
+#endif
+        return NULL;
+    }
+
+    /* At this point m is less than a year so casting to an int is safe */
+    p->tm_mday = (int) m + 1;
+    p->tm_yday = julian_days_by_month[leap][v_tm_mon] + (int)m;
+    p->tm_sec  = v_tm_sec;
+    p->tm_min  = v_tm_min;
+    p->tm_hour = v_tm_hour;
+    p->tm_mon  = v_tm_mon;
+    p->tm_wday = v_tm_wday;
+    
+    assert(check_tm(p));
+
+    return p;
+}
+
+
+struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm)
+{
+    time_t safe_time;
+    struct tm safe_date;
+    struct TM gm_tm;
+    Year orig_year;
+    int month_diff;
+
+    assert(local_tm != NULL);
+
+    /* Use the system localtime() if time_t is small enough */
+    if( SHOULD_USE_SYSTEM_LOCALTIME(*time) ) {
+        safe_time = (time_t)*time;
+
+        TIME64_TRACE1("Using system localtime for %lld\n", *time);
+
+        LOCALTIME_R(&safe_time, &safe_date);
+
+        copy_tm_to_TM64(&safe_date, local_tm);
+        assert(check_tm(local_tm));
+
+        return local_tm;
+    }
+
+    if( gmtime64_r(time, &gm_tm) == NULL ) {
+        TIME64_TRACE1("gmtime64_r returned null for %lld\n", *time);
+        return NULL;
+    }
+
+    orig_year = gm_tm.tm_year;
+
+    if (gm_tm.tm_year > (2037 - 1900) ||
+        gm_tm.tm_year < (1970 - 1900)
+       )
+    {
+        TIME64_TRACE1("Mapping tm_year %lld to safe_year\n", (Year)gm_tm.tm_year);
+        gm_tm.tm_year = safe_year((Year)(gm_tm.tm_year + 1900)) - 1900;
+    }
+
+    safe_time = (time_t)timegm64(&gm_tm);
+    if( LOCALTIME_R(&safe_time, &safe_date) == NULL ) {
+        TIME64_TRACE1("localtime_r(%d) returned NULL\n", (int)safe_time);
+        return NULL;
+    }
+
+    copy_tm_to_TM64(&safe_date, local_tm);
+
+    local_tm->tm_year = orig_year;
+    if( local_tm->tm_year != orig_year ) {
+        TIME64_TRACE2("tm_year overflow: tm_year %lld, orig_year %lld\n",
+              (Year)local_tm->tm_year, (Year)orig_year);
+
+#ifdef EOVERFLOW
+        errno = EOVERFLOW;
+#endif
+        return NULL;
+    }
+
+
+    month_diff = local_tm->tm_mon - gm_tm.tm_mon;
+
+    /*  When localtime is Dec 31st previous year and
+        gmtime is Jan 1st next year.
+    */
+    if( month_diff == 11 ) {
+        local_tm->tm_year--;
+    }
+
+    /*  When localtime is Jan 1st, next year and
+        gmtime is Dec 31st, previous year.
+    */
+    if( month_diff == -11 ) {
+        local_tm->tm_year++;
+    }
+
+    /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st 
+       in a non-leap xx00.  There is one point in the cycle
+       we can't account for which the safe xx00 year is a leap
+       year.  So we need to correct for Dec 31st comming out as
+       the 366th day of the year.
+    */
+    if( !IS_LEAP(local_tm->tm_year) && local_tm->tm_yday == 365 )
+        local_tm->tm_yday--;
+
+    assert(check_tm(local_tm));
+    
+    return local_tm;
+}
+
+
+int valid_tm_wday( const struct TM* date ) {
+    if( 0 <= date->tm_wday && date->tm_wday <= 6 )
+        return 1;
+    else
+        return 0;
+}
+
+int valid_tm_mon( const struct TM* date ) {
+    if( 0 <= date->tm_mon && date->tm_mon <= 11 )
+        return 1;
+    else
+        return 0;
+}
+
+
+char *asctime64_r( const struct TM* date, char *result ) {
+    /* I figure everything else can be displayed, even hour 25, but if
+       these are out of range we walk off the name arrays */
+    if( !valid_tm_wday(date) || !valid_tm_mon(date) )
+        return NULL;
+
+    sprintf(result, TM64_ASCTIME_FORMAT,
+        wday_name[date->tm_wday],
+        mon_name[date->tm_mon],
+        date->tm_mday, date->tm_hour,
+        date->tm_min, date->tm_sec,
+        1900 + date->tm_year);
+
+    return result;
+}
+
+
+char *ctime64_r( const Time64_T* time, char* result ) {
+    struct TM date;
+
+    localtime64_r( time, &date );
+    return asctime64_r( &date, result );
+}
+
+
+/* Non-thread safe versions of the above */
+struct TM *localtime64(const Time64_T *time) {
+    tzset();
+    return localtime64_r(time, &Static_Return_Date);
+}
+
+struct TM *gmtime64(const Time64_T *time) {
+    return gmtime64_r(time, &Static_Return_Date);
+}
+
+char *asctime64( const struct TM* date ) {
+    return asctime64_r( date, Static_Return_String );
+}
+
+char *ctime64( const Time64_T* time ) {
+    tzset();
+    return asctime64(localtime64(time));
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/contrib/native/client/src/clientlib/y2038/time64.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/y2038/time64.h b/contrib/native/client/src/clientlib/y2038/time64.h
new file mode 100644
index 0000000..89fbd7c
--- /dev/null
+++ b/contrib/native/client/src/clientlib/y2038/time64.h
@@ -0,0 +1,110 @@
+/* 
+
+Copyright (c) 2007-2010  Michael G Schwern
+
+This software originally derived from Paul Sheer's pivotal_gmtime_r.c.
+
+The MIT License:
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
+
+#ifndef TIME64_H
+#    define TIME64_H
+
+#include <time.h>
+#include "time64_config.h"
+
+/* Set our custom types */
+typedef INT_64_T        Int64;
+typedef Int64           Time64_T;
+typedef Int64           Year;
+
+
+/* A copy of the tm struct but with a 64 bit year */
+struct TM64 {
+        int     tm_sec;
+        int     tm_min;
+        int     tm_hour;
+        int     tm_mday;
+        int     tm_mon;
+        Year    tm_year;
+        int     tm_wday;
+        int     tm_yday;
+        int     tm_isdst;
+
+#ifdef HAS_TM_TM_GMTOFF
+        long    tm_gmtoff;
+#endif
+
+#ifdef HAS_TM_TM_ZONE
+        char    *tm_zone;
+#endif
+};
+
+
+/* Decide which tm struct to use */
+#ifdef USE_TM64
+#define TM      TM64
+#else
+#define TM      tm
+#endif   
+
+
+/* Declare public functions */
+struct TM *gmtime64_r    (const Time64_T *, struct TM *);
+struct TM *localtime64_r (const Time64_T *, struct TM *);
+struct TM *gmtime64      (const Time64_T *);
+struct TM *localtime64   (const Time64_T *);
+
+char *asctime64          (const struct TM *);
+char *asctime64_r        (const struct TM *, char *);
+
+char *ctime64            (const Time64_T*);
+char *ctime64_r          (const Time64_T*, char*);
+
+Time64_T   timegm64      (const struct TM *);
+Time64_T   mktime64      (struct TM *);
+Time64_T   timelocal64   (struct TM *);
+
+
+/* Not everyone has gm/localtime_r(), provide a replacement */
+#ifdef HAS_LOCALTIME_R
+#    define LOCALTIME_R(clock, result) localtime_r(clock, result)
+#else
+#    define LOCALTIME_R(clock, result) fake_localtime_r(clock, result)
+#endif
+#ifdef HAS_GMTIME_R
+#    define GMTIME_R(clock, result)    gmtime_r(clock, result)
+#else
+#    define GMTIME_R(clock, result)    fake_gmtime_r(clock, result)
+#endif
+
+
+/* Use a different asctime format depending on how big the year is */
+#ifdef USE_TM64
+    #define TM64_ASCTIME_FORMAT "%.3s %.3s%3d %.2d:%.2d:%.2d %lld\n"
+#else
+    #define TM64_ASCTIME_FORMAT "%.3s %.3s%3d %.2d:%.2d:%.2d %d\n"
+#endif
+
+
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/contrib/native/client/src/clientlib/y2038/time64_config.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/y2038/time64_config.h b/contrib/native/client/src/clientlib/y2038/time64_config.h
new file mode 100644
index 0000000..8f68bef
--- /dev/null
+++ b/contrib/native/client/src/clientlib/y2038/time64_config.h
@@ -0,0 +1,107 @@
+/* 
+
+Copyright (c) 2007-2010  Michael G Schwern
+
+This software originally derived from Paul Sheer's pivotal_gmtime_r.c.
+
+The MIT License:
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
+
+/* Configuration
+   -------------
+   Define as appropriate for your system.
+   Sensible defaults provided.
+*/
+
+
+#ifndef TIME64_CONFIG_H
+#    define TIME64_CONFIG_H
+
+/* Debugging
+   TIME_64_DEBUG
+   Define if you want debugging messages
+*/
+/* #define TIME_64_DEBUG */
+
+
+/* INT_64_T
+   A 64 bit integer type to use to store time and others.
+   Must be defined.
+*/
+#define INT_64_T                long long
+
+
+/* USE_TM64
+   Should we use a 64 bit safe replacement for tm?  This will
+   let you go past year 2 billion but the struct will be incompatible
+   with tm.  Conversion functions will be provided.
+*/
+/* #define USE_TM64 */
+
+
+/* Availability of system functions.
+
+   HAS_GMTIME_R
+   Define if your system has gmtime_r()
+
+   HAS_LOCALTIME_R
+   Define if your system has localtime_r()
+
+   HAS_TIMEGM
+   Define if your system has timegm(), a GNU extension.
+*/
+#ifndef _WIN32
+#define HAS_GMTIME_R
+#define HAS_LOCALTIME_R
+#endif
+/* #define HAS_TIMEGM */
+
+
+/* Details of non-standard tm struct elements.
+
+   HAS_TM_TM_GMTOFF
+   True if your tm struct has a "tm_gmtoff" element.
+   A BSD extension.
+
+   HAS_TM_TM_ZONE
+   True if your tm struct has a "tm_zone" element.
+   A BSD extension.
+*/
+/* #define HAS_TM_TM_GMTOFF */
+/* #define HAS_TM_TM_ZONE */
+
+
+/* USE_SYSTEM_LOCALTIME
+   USE_SYSTEM_GMTIME
+   USE_SYSTEM_MKTIME
+   USE_SYSTEM_TIMEGM
+   Should we use the system functions if the time is inside their range?
+   Your system localtime() is probably more accurate, but our gmtime() is
+   fast and safe.
+*/
+#define USE_SYSTEM_LOCALTIME
+/* #define USE_SYSTEM_GMTIME */
+#define USE_SYSTEM_MKTIME
+/* #define USE_SYSTEM_TIMEGM */
+
+#endif /* TIME64_CONFIG_H */

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/contrib/native/client/src/clientlib/y2038/time64_limits.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/y2038/time64_limits.h b/contrib/native/client/src/clientlib/y2038/time64_limits.h
new file mode 100644
index 0000000..e640cae
--- /dev/null
+++ b/contrib/native/client/src/clientlib/y2038/time64_limits.h
@@ -0,0 +1,124 @@
+/* 
+
+Copyright (c) 2007-2010  Michael G Schwern
+
+This software originally derived from Paul Sheer's pivotal_gmtime_r.c.
+
+The MIT License:
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
+
+/* 
+   Maximum and minimum inputs your system's respective time functions
+   can correctly handle.  time64.h will use your system functions if
+   the input falls inside these ranges and corresponding USE_SYSTEM_*
+   constant is defined.
+*/
+
+#ifndef TIME64_LIMITS_H
+#define TIME64_LIMITS_H
+
+/* Max/min for localtime() */
+#define SYSTEM_LOCALTIME_MAX     2147483647
+#define SYSTEM_LOCALTIME_MIN    -2147483647-1
+
+/* Max/min for gmtime() */
+#define SYSTEM_GMTIME_MAX        2147483647
+#define SYSTEM_GMTIME_MIN       -2147483647-1
+
+/* Max/min for mktime() */
+static const struct tm SYSTEM_MKTIME_MAX = {
+    7,
+    14,
+    19,
+    18,
+    0,
+    138,
+    1,
+    17,
+    0
+#ifdef HAS_TM_TM_GMTOFF
+    ,-28800
+#endif
+#ifdef HAS_TM_TM_ZONE
+    ,"PST"
+#endif
+};
+
+static const struct tm SYSTEM_MKTIME_MIN = {
+    52,
+    45,
+    12,
+    13,
+    11,
+    1,
+    5,
+    346,
+    0
+#ifdef HAS_TM_TM_GMTOFF
+    ,-28800
+#endif
+#ifdef HAS_TM_TM_ZONE
+    ,"PST"
+#endif
+};
+
+/* Max/min for timegm() */
+#ifdef HAS_TIMEGM
+static const struct tm SYSTEM_TIMEGM_MAX = {
+    7,
+    14,
+    3,
+    19,
+    0,
+    138,
+    2,
+    18,
+    0
+    #ifdef HAS_TM_TM_GMTOFF
+        ,0
+    #endif
+    #ifdef HAS_TM_TM_ZONE
+        ,"UTC"
+    #endif
+};
+
+static const struct tm SYSTEM_TIMEGM_MIN = {
+    52,
+    45,
+    20,
+    13,
+    11,
+    1,
+    5,
+    346,
+    0
+    #ifdef HAS_TM_TM_GMTOFF
+        ,0
+    #endif
+    #ifdef HAS_TM_TM_ZONE
+        ,"UTC"
+    #endif
+};
+#endif /* HAS_TIMEGM */
+
+#endif /* TIME64_LIMITS_H */

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/contrib/native/client/src/include/drill/drillc.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/include/drill/drillc.hpp b/contrib/native/client/src/include/drill/drillc.hpp
index e5a0d33..817b680 100644
--- a/contrib/native/client/src/include/drill/drillc.hpp
+++ b/contrib/native/client/src/include/drill/drillc.hpp
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#ifndef DRILL_CLIENT__ALL_H
+#ifndef DRILL_CLIENT_ALL_H
 #define DRILL_CLIENT_ALL_H
 
 #include "drill/common.hpp"

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/b90956e3/contrib/native/client/src/include/drill/recordBatch.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/include/drill/recordBatch.hpp b/contrib/native/client/src/include/drill/recordBatch.hpp
index 4ed1e31..dab8b9b 100644
--- a/contrib/native/client/src/include/drill/recordBatch.hpp
+++ b/contrib/native/client/src/include/drill/recordBatch.hpp
@@ -403,19 +403,20 @@ template <typename VALUE_TYPE>
 // more complex and start doing dynamic allocations in these classes.
 
 struct DateTimeBase{
-    DateTimeBase(){m_datetime=0;}
+    DateTimeBase():m_datetime(0){}
     virtual ~DateTimeBase(){}
-    uint64_t m_datetime;
+    int64_t m_datetime;
+    int64_t getMillis() const { return m_datetime; }
     virtual void load() =0;
     virtual std::string toString()=0;
 };
 
 struct DateHolder: public virtual DateTimeBase{
     DateHolder(){};
-    DateHolder(uint64_t d){m_datetime=d; load();}
-    uint32_t m_year;
-    uint32_t m_month;
-    uint32_t m_day;
+    DateHolder(int64_t d){m_datetime=d; load();}
+    int32_t m_year;
+    int32_t m_month;
+    int32_t m_day;
     void load();
     std::string toString();
 };
@@ -433,21 +434,21 @@ struct TimeHolder: public virtual DateTimeBase{
 
 struct DateTimeHolder: public DateHolder, public TimeHolder{
     DateTimeHolder(){};
-    DateTimeHolder(uint64_t d){m_datetime=d; load();}
+    DateTimeHolder(int64_t d){m_datetime=d; load();}
     void load();
     std::string toString();
 };
 
 struct DateTimeTZHolder: public DateTimeHolder{
     DateTimeTZHolder(ByteBuf_t b){
-        m_datetime=*(uint64_t*)b;
+        m_datetime=*(int64_t*)b;
         m_tzIndex=*(uint32_t*)(b+sizeof(uint64_t));
         load();
     }
     void load();
     std::string toString();
     int32_t m_tzIndex;
-    static uint32_t size(){ return sizeof(uint64_t)+sizeof(uint32_t); }
+    static uint32_t size(){ return sizeof(int64_t)+sizeof(uint32_t); }
 
 };
 
@@ -703,8 +704,8 @@ typedef NullableValueVectorTyped<DecimalValue , ValueVectorDecimal38Dense> Nulla
 typedef NullableValueVectorTyped<DecimalValue , ValueVectorDecimal28Sparse> NullableValueVectorDecimal28Sparse;
 typedef NullableValueVectorTyped<DecimalValue , ValueVectorDecimal38Sparse> NullableValueVectorDecimal38Sparse;
 
-typedef ValueVectorTyped<DateHolder, uint64_t> ValueVectorDate;
-typedef ValueVectorTyped<DateTimeHolder, uint64_t> ValueVectorTimestamp;
+typedef ValueVectorTyped<DateHolder, int64_t> ValueVectorDate;
+typedef ValueVectorTyped<DateTimeHolder, int64_t> ValueVectorTimestamp;
 typedef ValueVectorTyped<TimeHolder, uint32_t> ValueVectorTime;
 typedef ValueVectorTypedComposite<DateTimeTZHolder> ValueVectorTimestampTZ;
 typedef ValueVectorTypedComposite<IntervalHolder> ValueVectorInterval;


[22/32] git commit: DRILL-1019: Handle multilpication overflow for decimal38 data type

Posted by ja...@apache.org.
DRILL-1019: Handle multilpication overflow for decimal38 data type


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/20605067
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/20605067
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/20605067

Branch: refs/heads/master
Commit: 2060506781a6b5f9c2dc5ce244ffb532fed556a8
Parents: da61823
Author: Mehant Baid <me...@gmail.com>
Authored: Wed Jun 18 02:13:51 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Fri Jun 20 10:56:16 2014 -0700

----------------------------------------------------------------------
 .../templates/Decimal/DecimalFunctions.java     | 63 ++++++++++++++++++--
 .../drill/jdbc/test/TestFunctionsQuery.java     | 17 +++++-
 2 files changed, 73 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/20605067/exec/java-exec/src/main/codegen/templates/Decimal/DecimalFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/DecimalFunctions.java b/exec/java-exec/src/main/codegen/templates/Decimal/DecimalFunctions.java
index 3f5b5cd..864c461 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/DecimalFunctions.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/DecimalFunctions.java
@@ -392,7 +392,6 @@ public class ${type.name}Functions {
         }
 
         public void eval() {
-
             if (outputPrecision == Integer.MIN_VALUE) {
                 org.apache.drill.common.util.DecimalScalePrecisionMulFunction resultScalePrec =
                 new org.apache.drill.common.util.DecimalScalePrecisionMulFunction((int) left.precision, (int) left.scale, (int) right.precision, (int) right.scale);
@@ -437,10 +436,6 @@ public class ${type.name}Functions {
             int resultIntegerSize = leftIntegerSize + rightIntegerSize;
             int resultScaleSize = org.apache.drill.common.util.DecimalUtility.roundUp(left.scale + right.scale);
 
-            if ((resultIntegerSize + resultScaleSize) > result.nDecimalDigits) {
-                throw new org.apache.drill.common.exceptions.DrillRuntimeException("Cannot fit multiplication result in the given Decimal type");
-            }
-
             int leftSize  = left.nDecimalDigits - 1;
             int rightSize = right.nDecimalDigits - 1;
 
@@ -475,6 +470,63 @@ public class ${type.name}Functions {
                 resultIndex--;
             }
 
+            /* We have computed the result of the multiplication; check if we need to
+             * round a portion of the fractional part
+             */
+            resultScaleSize = org.apache.drill.common.util.DecimalUtility.roundUp(result.scale);
+
+            if (result.scale < (left.scale + right.scale)) {
+              /* The scale of the output data type is lesser than the scale
+               * we obtained as a result of multiplication, we need to round
+               * a chunk of the fractional part
+               */
+              int lastScaleIndex = currentIndex + resultIntegerSize + resultScaleSize - 1;
+
+              // Compute the power of 10 necessary to chop of the fractional part
+              int scaleFactor = (int) (org.apache.drill.common.util.DecimalUtility.getPowerOfTen(
+                                        org.apache.drill.common.util.DecimalUtility.MAX_DIGITS - (result.scale % org.apache.drill.common.util.DecimalUtility.MAX_DIGITS)));
+
+              // compute the power of 10 necessary to find if we need to round up
+              int roundFactor = (int) (org.apache.drill.common.util.DecimalUtility.getPowerOfTen(
+                                        org.apache.drill.common.util.DecimalUtility.MAX_DIGITS - ((result.scale + 1) % org.apache.drill.common.util.DecimalUtility.MAX_DIGITS)));
+
+              // index of rounding digit
+              int roundIndex = currentIndex + resultIntegerSize + org.apache.drill.common.util.DecimalUtility.roundUp(result.scale + 1) - 1;
+
+              // Check the first chopped digit to see if we need to round up
+              int carry = ((tempResult[roundIndex] / roundFactor) % 10) > 4 ? 1 : 0;
+
+              // Adjust the carry so that it gets added to the correct digit
+              carry *= scaleFactor;
+
+              // Chop the unwanted fractional part
+              tempResult[lastScaleIndex] /=  scaleFactor;
+              tempResult[lastScaleIndex] *= scaleFactor;
+
+              // propogate the carry
+              while (carry > 0 && lastScaleIndex >= 0) {
+                int tempSum = tempResult[lastScaleIndex] + carry;
+                if (tempSum >= org.apache.drill.common.util.DecimalUtility.DIGITS_BASE) {
+                  tempResult[lastScaleIndex] = (tempSum % org.apache.drill.common.util.DecimalUtility.DIGITS_BASE);
+                  carry = (int) (tempSum / org.apache.drill.common.util.DecimalUtility.DIGITS_BASE);
+                } else {
+                  tempResult[lastScaleIndex] = tempSum;
+                  carry = 0;
+                }
+                lastScaleIndex--;
+              }
+
+              // check if carry has increased integer digit
+              if ((lastScaleIndex + 1) < currentIndex) {
+                resultIntegerSize++;
+                currentIndex = lastScaleIndex + 1;
+              }
+            }
+
+            if (resultIntegerSize > result.nDecimalDigits) {
+              throw new org.apache.drill.common.exceptions.DrillRuntimeException("Cannot fit multiplication result in the given decimal type");
+            }
+
             int outputIndex = result.nDecimalDigits - 1;
 
             for (int i = (currentIndex + resultIntegerSize + resultScaleSize - 1); i >= currentIndex; i--) {
@@ -485,7 +537,6 @@ public class ${type.name}Functions {
             while(outputIndex >= 0) {
               result.setInteger(outputIndex--, 0);
             }
-
             result.setSign(left.getSign() != right.getSign());
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/20605067/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestFunctionsQuery.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestFunctionsQuery.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestFunctionsQuery.java
index 64bdf6d..8660579 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestFunctionsQuery.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestFunctionsQuery.java
@@ -562,7 +562,7 @@ public class TestFunctionsQuery {
 
   @Test
   public void testDecimal18Decimal38Comparison() throws Exception {
-    String query = "select cast('999999999.999999999' as decimal(18, 9)) = cast('999999999.999999999' as decimal(38, 18)) as CMP " +
+    String query = "select cast('-999999999.999999999' as decimal(18, 9)) = cast('-999999999.999999999' as decimal(38, 18)) as CMP " +
         "from cp.`employee.json` where employee_id = 1";
 
     JdbcAssert.withNoDefaultSchema()
@@ -570,4 +570,19 @@ public class TestFunctionsQuery {
         .returns(
             "CMP=true\n");
   }
+
+  @Test
+  public void testDecimalMultiplicationOverflowHandling() throws Exception {
+    String query = "select cast('1' as decimal(9, 5)) * cast ('999999999999999999999999999.999999999' as decimal(38, 9)) as DEC38_1, " +
+                   "cast('1000000000000000001.000000000000000000' as decimal(38, 18)) * cast('0.999999999999999999' as decimal(38, 18)) as DEC38_2, " +
+                   "cast('3' as decimal(9, 8)) * cast ('333333333.3333333333333333333' as decimal(38, 19)) as DEC38_3 " +
+                   "from cp.`employee.json` where employee_id = 1";
+
+    JdbcAssert.withNoDefaultSchema()
+        .sql(query)
+        .returns(
+            "DEC38_1=1000000000000000000000000000.00000; " +
+            "DEC38_2=1000000000000000000; " +
+            "DEC38_3=1000000000.000000000000000000\n");
+  }
 }


[03/32] git commit: DRILL-1006 : Fix case expression when conditions are expression of nullable types.

Posted by ja...@apache.org.
DRILL-1006 : Fix case expression when conditions are expression of nullable types.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/c373a278
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/c373a278
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/c373a278

Branch: refs/heads/master
Commit: c373a278f637235e989787517416f141d42b2d29
Parents: 4205780
Author: Jinfeng Ni <jn...@maprtech.com>
Authored: Tue Jun 17 13:12:05 2014 -0700
Committer: Jinfeng Ni <jn...@maprtech.com>
Committed: Wed Jun 18 07:14:10 2014 -0700

----------------------------------------------------------------------
 .../common/expression/visitors/ExpressionValidator.java   |  6 +++---
 .../org/apache/drill/exec/expr/EvaluationVisitor.java     | 10 +++++-----
 2 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/c373a278/common/src/main/java/org/apache/drill/common/expression/visitors/ExpressionValidator.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/visitors/ExpressionValidator.java b/common/src/main/java/org/apache/drill/common/expression/visitors/ExpressionValidator.java
index c8b7857..1bfb57d 100644
--- a/common/src/main/java/org/apache/drill/common/expression/visitors/ExpressionValidator.java
+++ b/common/src/main/java/org/apache/drill/common/expression/visitors/ExpressionValidator.java
@@ -78,14 +78,14 @@ public class ExpressionValidator implements ExprVisitor<Void, ErrorCollector, Ru
     int i = 0;
     for (IfCondition c : ifExpr.conditions) {
       MajorType mt = c.condition.getMajorType();
-      if (mt.getMode() != DataMode.REQUIRED || mt.getMinorType() != MinorType.BIT) {
+      if ( mt.getMinorType() != MinorType.BIT) {
         errors
             .addGeneralError(
                 c.condition.getPosition(),
                 String
                     .format(
-                        "Failure composing If Expression.  All conditions must return a required value and be of type boolean.  Condition %d was DatMode %s and Type %s.",
-                        i, mt.getMode(), mt.getMinorType()));
+                        "Failure composing If Expression.  All conditions must return a boolean type.  Condition %d was of Type %s.",
+                        i, mt.getMinorType()));
       }
       i++;
     }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/c373a278/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
index efc7259..d65e618 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
@@ -159,15 +159,15 @@ public class EvaluationVisitor {
         HoldingContainer holdingContainer = c.condition.accept(this, generator);
         if (jc == null) {
           if (holdingContainer.isOptional()) {
-            jc = conditionalBlock._if(holdingContainer.getIsSet().cand(holdingContainer.getValue()));
+            jc = conditionalBlock._if(holdingContainer.getIsSet().eq(JExpr.lit(1)).cand(holdingContainer.getValue().eq(JExpr.lit(1))));
           } else {
             jc = conditionalBlock._if(holdingContainer.getValue().eq(JExpr.lit(1)));
           }
         } else {
           if (holdingContainer.isOptional()) {
-            jc = jc._else()._if(holdingContainer.getIsSet().cand(holdingContainer.getValue()));
+            jc = jc._else()._if(holdingContainer.getIsSet().eq(JExpr.lit(1)).cand(holdingContainer.getValue().eq(JExpr.lit(1))));
           } else {
-            jc = jc._else()._if(holdingContainer.getValue());
+            jc = jc._else()._if(holdingContainer.getValue().eq(JExpr.lit(1)));
           }
         }
 
@@ -176,7 +176,7 @@ public class EvaluationVisitor {
           JConditional newCond = jc._then()._if(thenExpr.getIsSet().ne(JExpr.lit(0)));
           JBlock b = newCond._then();
           b.assign(output.getHolder(), thenExpr.getHolder());
-          b.assign(output.getIsSet(), thenExpr.getIsSet());
+          //b.assign(output.getIsSet(), thenExpr.getIsSet());
         } else {
           jc._then().assign(output.getHolder(), thenExpr.getHolder());
         }
@@ -188,7 +188,7 @@ public class EvaluationVisitor {
         JConditional newCond = jc._else()._if(elseExpr.getIsSet().ne(JExpr.lit(0)));
         JBlock b = newCond._then();
         b.assign(output.getHolder(), elseExpr.getHolder());
-        b.assign(output.getIsSet(), elseExpr.getIsSet());
+        //b.assign(output.getIsSet(), elseExpr.getIsSet());
       } else {
         jc._else().assign(output.getHolder(), elseExpr.getHolder());
       }


[05/32] git commit: DRILL-1009: Remove 'HBasePStoreProvider' from HBase storage module's drill-module.conf

Posted by ja...@apache.org.
DRILL-1009: Remove 'HBasePStoreProvider' from HBase storage module's drill-module.conf


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/199f4674
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/199f4674
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/199f4674

Branch: refs/heads/master
Commit: 199f46741fe26e87ac840c6b3ae5398f5f5fae79
Parents: c7712f8
Author: Aditya Kishore <ad...@maprtech.com>
Authored: Wed Jun 18 01:57:04 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Wed Jun 18 20:28:48 2014 -0700

----------------------------------------------------------------------
 contrib/storage-hbase/src/main/resources/drill-module.conf      | 5 +++--
 .../java/org/apache/drill/exec/store/sys/PStoreRegistry.java    | 1 +
 2 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/199f4674/contrib/storage-hbase/src/main/resources/drill-module.conf
----------------------------------------------------------------------
diff --git a/contrib/storage-hbase/src/main/resources/drill-module.conf b/contrib/storage-hbase/src/main/resources/drill-module.conf
index 46c0720..0edceaf 100644
--- a/contrib/storage-hbase/src/main/resources/drill-module.conf
+++ b/contrib/storage-hbase/src/main/resources/drill-module.conf
@@ -20,9 +20,10 @@
 drill.exec: {
 
   sys.store.provider: {
-    class: "org.apache.drill.exec.store.hbase.config.HBasePStoreProvider",
+    # HBasePStoreProvider is disabled by default
+    # class: "org.apache.drill.exec.store.hbase.config.HBasePStoreProvider",
     hbase: {
-      tableName : "drill_store",
+      table : "drill_store",
       config : {
         "hbase.zookeeper.quorum" : "localhost",
         "hbase.zookeeper.property.clientPort" : 2181

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/199f4674/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PStoreRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PStoreRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PStoreRegistry.java
index e69c12d..4fe23aa 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PStoreRegistry.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PStoreRegistry.java
@@ -50,6 +50,7 @@ public class PStoreRegistry {
   public PStoreProvider newPStoreProvider() throws ExecutionSetupException {
     try {
       String storeProviderClassName = config.getString(ExecConstants.SYS_STORE_PROVIDER_CLASS);
+      logger.info("Using the configured PStoreProvider class: '{}'.", storeProviderClassName);
       Class<? extends PStoreProvider> storeProviderClass = (Class<? extends PStoreProvider>) Class.forName(storeProviderClassName);
       Constructor<? extends PStoreProvider> c = storeProviderClass.getConstructor(PStoreRegistry.class);
       return c.newInstance(this);


[13/32] DRILL-875: Fixes for DRILL-707, DRILL-780, DRILL-835 (Schema change), DRILL-852, DRILL-876, DRILL_877, DRILL-878, DRILL-890

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/protobuf/BitControl.pb.cc
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/BitControl.pb.cc b/contrib/native/client/src/protobuf/BitControl.pb.cc
index 6e0e325..de6a9ca 100644
--- a/contrib/native/client/src/protobuf/BitControl.pb.cc
+++ b/contrib/native/client/src/protobuf/BitControl.pb.cc
@@ -31,7 +31,6 @@ const ::google::protobuf::internal::GeneratedMessageReflection*
 const ::google::protobuf::Descriptor* FragmentStatus_descriptor_ = NULL;
 const ::google::protobuf::internal::GeneratedMessageReflection*
   FragmentStatus_reflection_ = NULL;
-const ::google::protobuf::EnumDescriptor* FragmentStatus_FragmentState_descriptor_ = NULL;
 const ::google::protobuf::Descriptor* PlanFragment_descriptor_ = NULL;
 const ::google::protobuf::internal::GeneratedMessageReflection*
   PlanFragment_reflection_ = NULL;
@@ -82,16 +81,9 @@ void protobuf_AssignDesc_BitControl_2eproto() {
       ::google::protobuf::MessageFactory::generated_factory(),
       sizeof(BitStatus));
   FragmentStatus_descriptor_ = file->message_type(2);
-  static const int FragmentStatus_offsets_[9] = {
-    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FragmentStatus, memory_use_),
-    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FragmentStatus, batches_completed_),
-    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FragmentStatus, records_completed_),
-    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FragmentStatus, estimated_completion_percentage_),
-    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FragmentStatus, state_),
-    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FragmentStatus, data_processed_),
+  static const int FragmentStatus_offsets_[2] = {
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FragmentStatus, profile_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FragmentStatus, handle_),
-    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FragmentStatus, error_),
-    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(FragmentStatus, running_time_),
   };
   FragmentStatus_reflection_ =
     new ::google::protobuf::internal::GeneratedMessageReflection(
@@ -104,7 +96,6 @@ void protobuf_AssignDesc_BitControl_2eproto() {
       ::google::protobuf::DescriptorPool::generated_pool(),
       ::google::protobuf::MessageFactory::generated_factory(),
       sizeof(FragmentStatus));
-  FragmentStatus_FragmentState_descriptor_ = FragmentStatus_descriptor_->enum_type(0);
   PlanFragment_descriptor_ = file->message_type(3);
   static const int PlanFragment_offsets_[15] = {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(PlanFragment, handle_),
@@ -208,39 +199,31 @@ void protobuf_AddDesc_BitControl_2eproto() {
     "(\0162\027.exec.shared.RpcChannel:\013BIT_CONTROL"
     "\022(\n\010endpoint\030\003 \001(\0132\026.exec.DrillbitEndpoi"
     "nt\"F\n\tBitStatus\0229\n\017fragment_status\030\001 \003(\013"
-    "2 .exec.bit.control.FragmentStatus\"\261\003\n\016F"
-    "ragmentStatus\022\022\n\nmemory_use\030\001 \001(\003\022\031\n\021bat"
-    "ches_completed\030\002 \001(\003\022\031\n\021records_complete"
-    "d\030\003 \001(\003\022\'\n\037estimated_completion_percenta"
-    "ge\030\004 \001(\005\022=\n\005state\030\005 \001(\0162..exec.bit.contr"
-    "ol.FragmentStatus.FragmentState\022\026\n\016data_"
-    "processed\030\006 \001(\003\022(\n\006handle\030\007 \001(\0132\030.exec.b"
-    "it.FragmentHandle\022(\n\005error\030\010 \001(\0132\031.exec."
-    "shared.DrillPBError\022\024\n\014running_time\030\t \001("
-    "\003\"k\n\rFragmentState\022\013\n\007SENDING\020\000\022\027\n\023AWAIT"
-    "ING_ALLOCATION\020\001\022\013\n\007RUNNING\020\002\022\014\n\010FINISHE"
-    "D\020\003\022\r\n\tCANCELLED\020\004\022\n\n\006FAILED\020\005\"\276\003\n\014PlanF"
-    "ragment\022(\n\006handle\030\001 \001(\0132\030.exec.bit.Fragm"
-    "entHandle\022\024\n\014network_cost\030\004 \001(\002\022\020\n\010cpu_c"
-    "ost\030\005 \001(\002\022\021\n\tdisk_cost\030\006 \001(\002\022\023\n\013memory_c"
-    "ost\030\007 \001(\002\022\025\n\rfragment_json\030\010 \001(\t\022*\n\nassi"
-    "gnment\030\n \001(\0132\026.exec.DrillbitEndpoint\022\025\n\r"
-    "leaf_fragment\030\t \001(\010\022\'\n\007foreman\030\013 \001(\0132\026.e"
-    "xec.DrillbitEndpoint\022\035\n\013mem_initial\030\014 \001("
-    "\003:\01020000000\022\034\n\007mem_max\030\r \001(\003:\0132000000000"
-    "0\022\030\n\020query_start_time\030\016 \001(\003\0221\n\013credentia"
-    "ls\030\017 \001(\0132\034.exec.shared.UserCredentials\022\021"
-    "\n\ttime_zone\030\020 \001(\005\022\024\n\014options_json\030\021 \001(\t\""
-    "f\n\017WorkQueueStatus\022(\n\010endpoint\030\001 \001(\0132\026.e"
-    "xec.DrillbitEndpoint\022\024\n\014queue_length\030\002 \001"
-    "(\005\022\023\n\013report_time\030\003 \001(\003*\332\001\n\007RpcType\022\r\n\tH"
-    "ANDSHAKE\020\000\022\007\n\003ACK\020\001\022\013\n\007GOODBYE\020\002\022\033\n\027REQ_"
-    "INIATILIZE_FRAGMENT\020\003\022\027\n\023REQ_CANCEL_FRAG"
-    "MENT\020\006\022\027\n\023REQ_FRAGMENT_STATUS\020\007\022\022\n\016REQ_B"
-    "IT_STATUS\020\010\022\030\n\024RESP_FRAGMENT_HANDLE\020\t\022\030\n"
-    "\024RESP_FRAGMENT_STATUS\020\n\022\023\n\017RESP_BIT_STAT"
-    "US\020\013B+\n\033org.apache.drill.exec.protoB\nBit"
-    "ControlH\001", 1569);
+    "2 .exec.bit.control.FragmentStatus\"n\n\016Fr"
+    "agmentStatus\0222\n\007profile\030\001 \001(\0132!.exec.sha"
+    "red.MinorFragmentProfile\022(\n\006handle\030\002 \001(\013"
+    "2\030.exec.bit.FragmentHandle\"\276\003\n\014PlanFragm"
+    "ent\022(\n\006handle\030\001 \001(\0132\030.exec.bit.FragmentH"
+    "andle\022\024\n\014network_cost\030\004 \001(\002\022\020\n\010cpu_cost\030"
+    "\005 \001(\002\022\021\n\tdisk_cost\030\006 \001(\002\022\023\n\013memory_cost\030"
+    "\007 \001(\002\022\025\n\rfragment_json\030\010 \001(\t\022*\n\nassignme"
+    "nt\030\n \001(\0132\026.exec.DrillbitEndpoint\022\025\n\rleaf"
+    "_fragment\030\t \001(\010\022\'\n\007foreman\030\013 \001(\0132\026.exec."
+    "DrillbitEndpoint\022\035\n\013mem_initial\030\014 \001(\003:\0102"
+    "0000000\022\034\n\007mem_max\030\r \001(\003:\01320000000000\022\030\n"
+    "\020query_start_time\030\016 \001(\003\0221\n\013credentials\030\017"
+    " \001(\0132\034.exec.shared.UserCredentials\022\021\n\tti"
+    "me_zone\030\020 \001(\005\022\024\n\014options_json\030\021 \001(\t\"f\n\017W"
+    "orkQueueStatus\022(\n\010endpoint\030\001 \001(\0132\026.exec."
+    "DrillbitEndpoint\022\024\n\014queue_length\030\002 \001(\005\022\023"
+    "\n\013report_time\030\003 \001(\003*\332\001\n\007RpcType\022\r\n\tHANDS"
+    "HAKE\020\000\022\007\n\003ACK\020\001\022\013\n\007GOODBYE\020\002\022\033\n\027REQ_INIA"
+    "TILIZE_FRAGMENT\020\003\022\027\n\023REQ_CANCEL_FRAGMENT"
+    "\020\006\022\027\n\023REQ_FRAGMENT_STATUS\020\007\022\022\n\016REQ_BIT_S"
+    "TATUS\020\010\022\030\n\024RESP_FRAGMENT_HANDLE\020\t\022\030\n\024RES"
+    "P_FRAGMENT_STATUS\020\n\022\023\n\017RESP_BIT_STATUS\020\013"
+    "B+\n\033org.apache.drill.exec.protoB\nBitCont"
+    "rolH\001", 1245);
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
     "BitControl.proto", &protobuf_RegisterTypes);
   BitControlHandshake::default_instance_ = new BitControlHandshake();
@@ -789,45 +772,9 @@ void BitStatus::Swap(BitStatus* other) {
 
 // ===================================================================
 
-const ::google::protobuf::EnumDescriptor* FragmentStatus_FragmentState_descriptor() {
-  protobuf_AssignDescriptorsOnce();
-  return FragmentStatus_FragmentState_descriptor_;
-}
-bool FragmentStatus_FragmentState_IsValid(int value) {
-  switch(value) {
-    case 0:
-    case 1:
-    case 2:
-    case 3:
-    case 4:
-    case 5:
-      return true;
-    default:
-      return false;
-  }
-}
-
-#ifndef _MSC_VER
-const FragmentStatus_FragmentState FragmentStatus::SENDING;
-const FragmentStatus_FragmentState FragmentStatus::AWAITING_ALLOCATION;
-const FragmentStatus_FragmentState FragmentStatus::RUNNING;
-const FragmentStatus_FragmentState FragmentStatus::FINISHED;
-const FragmentStatus_FragmentState FragmentStatus::CANCELLED;
-const FragmentStatus_FragmentState FragmentStatus::FAILED;
-const FragmentStatus_FragmentState FragmentStatus::FragmentState_MIN;
-const FragmentStatus_FragmentState FragmentStatus::FragmentState_MAX;
-const int FragmentStatus::FragmentState_ARRAYSIZE;
-#endif  // _MSC_VER
 #ifndef _MSC_VER
-const int FragmentStatus::kMemoryUseFieldNumber;
-const int FragmentStatus::kBatchesCompletedFieldNumber;
-const int FragmentStatus::kRecordsCompletedFieldNumber;
-const int FragmentStatus::kEstimatedCompletionPercentageFieldNumber;
-const int FragmentStatus::kStateFieldNumber;
-const int FragmentStatus::kDataProcessedFieldNumber;
+const int FragmentStatus::kProfileFieldNumber;
 const int FragmentStatus::kHandleFieldNumber;
-const int FragmentStatus::kErrorFieldNumber;
-const int FragmentStatus::kRunningTimeFieldNumber;
 #endif  // !_MSC_VER
 
 FragmentStatus::FragmentStatus()
@@ -836,8 +783,8 @@ FragmentStatus::FragmentStatus()
 }
 
 void FragmentStatus::InitAsDefaultInstance() {
+  profile_ = const_cast< ::exec::shared::MinorFragmentProfile*>(&::exec::shared::MinorFragmentProfile::default_instance());
   handle_ = const_cast< ::exec::bit::FragmentHandle*>(&::exec::bit::FragmentHandle::default_instance());
-  error_ = const_cast< ::exec::shared::DrillPBError*>(&::exec::shared::DrillPBError::default_instance());
 }
 
 FragmentStatus::FragmentStatus(const FragmentStatus& from)
@@ -848,15 +795,8 @@ FragmentStatus::FragmentStatus(const FragmentStatus& from)
 
 void FragmentStatus::SharedCtor() {
   _cached_size_ = 0;
-  memory_use_ = GOOGLE_LONGLONG(0);
-  batches_completed_ = GOOGLE_LONGLONG(0);
-  records_completed_ = GOOGLE_LONGLONG(0);
-  estimated_completion_percentage_ = 0;
-  state_ = 0;
-  data_processed_ = GOOGLE_LONGLONG(0);
+  profile_ = NULL;
   handle_ = NULL;
-  error_ = NULL;
-  running_time_ = GOOGLE_LONGLONG(0);
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
 }
 
@@ -866,8 +806,8 @@ FragmentStatus::~FragmentStatus() {
 
 void FragmentStatus::SharedDtor() {
   if (this != default_instance_) {
+    delete profile_;
     delete handle_;
-    delete error_;
   }
 }
 
@@ -894,21 +834,12 @@ FragmentStatus* FragmentStatus::New() const {
 
 void FragmentStatus::Clear() {
   if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    memory_use_ = GOOGLE_LONGLONG(0);
-    batches_completed_ = GOOGLE_LONGLONG(0);
-    records_completed_ = GOOGLE_LONGLONG(0);
-    estimated_completion_percentage_ = 0;
-    state_ = 0;
-    data_processed_ = GOOGLE_LONGLONG(0);
+    if (has_profile()) {
+      if (profile_ != NULL) profile_->::exec::shared::MinorFragmentProfile::Clear();
+    }
     if (has_handle()) {
       if (handle_ != NULL) handle_->::exec::bit::FragmentHandle::Clear();
     }
-    if (has_error()) {
-      if (error_ != NULL) error_->::exec::shared::DrillPBError::Clear();
-    }
-  }
-  if (_has_bits_[8 / 32] & (0xffu << (8 % 32))) {
-    running_time_ = GOOGLE_LONGLONG(0);
   }
   ::memset(_has_bits_, 0, sizeof(_has_bits_));
   mutable_unknown_fields()->Clear();
@@ -920,109 +851,22 @@ bool FragmentStatus::MergePartialFromCodedStream(
   ::google::protobuf::uint32 tag;
   while ((tag = input->ReadTag()) != 0) {
     switch (::google::protobuf::internal::WireFormatLite::GetTagFieldNumber(tag)) {
-      // optional int64 memory_use = 1;
+      // optional .exec.shared.MinorFragmentProfile profile = 1;
       case 1: {
         if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
-                 input, &memory_use_)));
-          set_has_memory_use();
+            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
+          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
+               input, mutable_profile()));
         } else {
           goto handle_uninterpreted;
         }
-        if (input->ExpectTag(16)) goto parse_batches_completed;
+        if (input->ExpectTag(18)) goto parse_handle;
         break;
       }
 
-      // optional int64 batches_completed = 2;
+      // optional .exec.bit.FragmentHandle handle = 2;
       case 2: {
         if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_batches_completed:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
-                 input, &batches_completed_)));
-          set_has_batches_completed();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(24)) goto parse_records_completed;
-        break;
-      }
-
-      // optional int64 records_completed = 3;
-      case 3: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_records_completed:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
-                 input, &records_completed_)));
-          set_has_records_completed();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(32)) goto parse_estimated_completion_percentage;
-        break;
-      }
-
-      // optional int32 estimated_completion_percentage = 4;
-      case 4: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_estimated_completion_percentage:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int32, ::google::protobuf::internal::WireFormatLite::TYPE_INT32>(
-                 input, &estimated_completion_percentage_)));
-          set_has_estimated_completion_percentage();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(40)) goto parse_state;
-        break;
-      }
-
-      // optional .exec.bit.control.FragmentStatus.FragmentState state = 5;
-      case 5: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_state:
-          int value;
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
-                 input, &value)));
-          if (::exec::bit::control::FragmentStatus_FragmentState_IsValid(value)) {
-            set_state(static_cast< ::exec::bit::control::FragmentStatus_FragmentState >(value));
-          } else {
-            mutable_unknown_fields()->AddVarint(5, value);
-          }
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(48)) goto parse_data_processed;
-        break;
-      }
-
-      // optional int64 data_processed = 6;
-      case 6: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_data_processed:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
-                 input, &data_processed_)));
-          set_has_data_processed();
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(58)) goto parse_handle;
-        break;
-      }
-
-      // optional .exec.bit.FragmentHandle handle = 7;
-      case 7: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
             ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
          parse_handle:
           DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
@@ -1030,36 +874,6 @@ bool FragmentStatus::MergePartialFromCodedStream(
         } else {
           goto handle_uninterpreted;
         }
-        if (input->ExpectTag(66)) goto parse_error;
-        break;
-      }
-
-      // optional .exec.shared.DrillPBError error = 8;
-      case 8: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
-         parse_error:
-          DO_(::google::protobuf::internal::WireFormatLite::ReadMessageNoVirtual(
-               input, mutable_error()));
-        } else {
-          goto handle_uninterpreted;
-        }
-        if (input->ExpectTag(72)) goto parse_running_time;
-        break;
-      }
-
-      // optional int64 running_time = 9;
-      case 9: {
-        if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
-            ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
-         parse_running_time:
-          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
-                   ::google::protobuf::int64, ::google::protobuf::internal::WireFormatLite::TYPE_INT64>(
-                 input, &running_time_)));
-          set_has_running_time();
-        } else {
-          goto handle_uninterpreted;
-        }
         if (input->ExpectAtEnd()) return true;
         break;
       }
@@ -1082,52 +896,16 @@ bool FragmentStatus::MergePartialFromCodedStream(
 
 void FragmentStatus::SerializeWithCachedSizes(
     ::google::protobuf::io::CodedOutputStream* output) const {
-  // optional int64 memory_use = 1;
-  if (has_memory_use()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt64(1, this->memory_use(), output);
-  }
-
-  // optional int64 batches_completed = 2;
-  if (has_batches_completed()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt64(2, this->batches_completed(), output);
-  }
-
-  // optional int64 records_completed = 3;
-  if (has_records_completed()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt64(3, this->records_completed(), output);
-  }
-
-  // optional int32 estimated_completion_percentage = 4;
-  if (has_estimated_completion_percentage()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt32(4, this->estimated_completion_percentage(), output);
-  }
-
-  // optional .exec.bit.control.FragmentStatus.FragmentState state = 5;
-  if (has_state()) {
-    ::google::protobuf::internal::WireFormatLite::WriteEnum(
-      5, this->state(), output);
-  }
-
-  // optional int64 data_processed = 6;
-  if (has_data_processed()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt64(6, this->data_processed(), output);
-  }
-
-  // optional .exec.bit.FragmentHandle handle = 7;
-  if (has_handle()) {
+  // optional .exec.shared.MinorFragmentProfile profile = 1;
+  if (has_profile()) {
     ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
-      7, this->handle(), output);
+      1, this->profile(), output);
   }
 
-  // optional .exec.shared.DrillPBError error = 8;
-  if (has_error()) {
+  // optional .exec.bit.FragmentHandle handle = 2;
+  if (has_handle()) {
     ::google::protobuf::internal::WireFormatLite::WriteMessageMaybeToArray(
-      8, this->error(), output);
-  }
-
-  // optional int64 running_time = 9;
-  if (has_running_time()) {
-    ::google::protobuf::internal::WireFormatLite::WriteInt64(9, this->running_time(), output);
+      2, this->handle(), output);
   }
 
   if (!unknown_fields().empty()) {
@@ -1138,54 +916,18 @@ void FragmentStatus::SerializeWithCachedSizes(
 
 ::google::protobuf::uint8* FragmentStatus::SerializeWithCachedSizesToArray(
     ::google::protobuf::uint8* target) const {
-  // optional int64 memory_use = 1;
-  if (has_memory_use()) {
-    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(1, this->memory_use(), target);
-  }
-
-  // optional int64 batches_completed = 2;
-  if (has_batches_completed()) {
-    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(2, this->batches_completed(), target);
-  }
-
-  // optional int64 records_completed = 3;
-  if (has_records_completed()) {
-    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(3, this->records_completed(), target);
-  }
-
-  // optional int32 estimated_completion_percentage = 4;
-  if (has_estimated_completion_percentage()) {
-    target = ::google::protobuf::internal::WireFormatLite::WriteInt32ToArray(4, this->estimated_completion_percentage(), target);
-  }
-
-  // optional .exec.bit.control.FragmentStatus.FragmentState state = 5;
-  if (has_state()) {
-    target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
-      5, this->state(), target);
-  }
-
-  // optional int64 data_processed = 6;
-  if (has_data_processed()) {
-    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(6, this->data_processed(), target);
-  }
-
-  // optional .exec.bit.FragmentHandle handle = 7;
-  if (has_handle()) {
+  // optional .exec.shared.MinorFragmentProfile profile = 1;
+  if (has_profile()) {
     target = ::google::protobuf::internal::WireFormatLite::
       WriteMessageNoVirtualToArray(
-        7, this->handle(), target);
+        1, this->profile(), target);
   }
 
-  // optional .exec.shared.DrillPBError error = 8;
-  if (has_error()) {
+  // optional .exec.bit.FragmentHandle handle = 2;
+  if (has_handle()) {
     target = ::google::protobuf::internal::WireFormatLite::
       WriteMessageNoVirtualToArray(
-        8, this->error(), target);
-  }
-
-  // optional int64 running_time = 9;
-  if (has_running_time()) {
-    target = ::google::protobuf::internal::WireFormatLite::WriteInt64ToArray(9, this->running_time(), target);
+        2, this->handle(), target);
   }
 
   if (!unknown_fields().empty()) {
@@ -1199,70 +941,20 @@ int FragmentStatus::ByteSize() const {
   int total_size = 0;
 
   if (_has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    // optional int64 memory_use = 1;
-    if (has_memory_use()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int64Size(
-          this->memory_use());
-    }
-
-    // optional int64 batches_completed = 2;
-    if (has_batches_completed()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int64Size(
-          this->batches_completed());
-    }
-
-    // optional int64 records_completed = 3;
-    if (has_records_completed()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int64Size(
-          this->records_completed());
-    }
-
-    // optional int32 estimated_completion_percentage = 4;
-    if (has_estimated_completion_percentage()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int32Size(
-          this->estimated_completion_percentage());
-    }
-
-    // optional .exec.bit.control.FragmentStatus.FragmentState state = 5;
-    if (has_state()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::EnumSize(this->state());
-    }
-
-    // optional int64 data_processed = 6;
-    if (has_data_processed()) {
+    // optional .exec.shared.MinorFragmentProfile profile = 1;
+    if (has_profile()) {
       total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int64Size(
-          this->data_processed());
+        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
+          this->profile());
     }
 
-    // optional .exec.bit.FragmentHandle handle = 7;
+    // optional .exec.bit.FragmentHandle handle = 2;
     if (has_handle()) {
       total_size += 1 +
         ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
           this->handle());
     }
 
-    // optional .exec.shared.DrillPBError error = 8;
-    if (has_error()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::MessageSizeNoVirtual(
-          this->error());
-    }
-
-  }
-  if (_has_bits_[8 / 32] & (0xffu << (8 % 32))) {
-    // optional int64 running_time = 9;
-    if (has_running_time()) {
-      total_size += 1 +
-        ::google::protobuf::internal::WireFormatLite::Int64Size(
-          this->running_time());
-    }
-
   }
   if (!unknown_fields().empty()) {
     total_size +=
@@ -1290,35 +982,12 @@ void FragmentStatus::MergeFrom(const ::google::protobuf::Message& from) {
 void FragmentStatus::MergeFrom(const FragmentStatus& from) {
   GOOGLE_CHECK_NE(&from, this);
   if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) {
-    if (from.has_memory_use()) {
-      set_memory_use(from.memory_use());
-    }
-    if (from.has_batches_completed()) {
-      set_batches_completed(from.batches_completed());
-    }
-    if (from.has_records_completed()) {
-      set_records_completed(from.records_completed());
-    }
-    if (from.has_estimated_completion_percentage()) {
-      set_estimated_completion_percentage(from.estimated_completion_percentage());
-    }
-    if (from.has_state()) {
-      set_state(from.state());
-    }
-    if (from.has_data_processed()) {
-      set_data_processed(from.data_processed());
+    if (from.has_profile()) {
+      mutable_profile()->::exec::shared::MinorFragmentProfile::MergeFrom(from.profile());
     }
     if (from.has_handle()) {
       mutable_handle()->::exec::bit::FragmentHandle::MergeFrom(from.handle());
     }
-    if (from.has_error()) {
-      mutable_error()->::exec::shared::DrillPBError::MergeFrom(from.error());
-    }
-  }
-  if (from._has_bits_[8 / 32] & (0xffu << (8 % 32))) {
-    if (from.has_running_time()) {
-      set_running_time(from.running_time());
-    }
   }
   mutable_unknown_fields()->MergeFrom(from.unknown_fields());
 }
@@ -1342,15 +1011,8 @@ bool FragmentStatus::IsInitialized() const {
 
 void FragmentStatus::Swap(FragmentStatus* other) {
   if (other != this) {
-    std::swap(memory_use_, other->memory_use_);
-    std::swap(batches_completed_, other->batches_completed_);
-    std::swap(records_completed_, other->records_completed_);
-    std::swap(estimated_completion_percentage_, other->estimated_completion_percentage_);
-    std::swap(state_, other->state_);
-    std::swap(data_processed_, other->data_processed_);
+    std::swap(profile_, other->profile_);
     std::swap(handle_, other->handle_);
-    std::swap(error_, other->error_);
-    std::swap(running_time_, other->running_time_);
     std::swap(_has_bits_[0], other->_has_bits_[0]);
     _unknown_fields_.Swap(&other->_unknown_fields_);
     std::swap(_cached_size_, other->_cached_size_);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/protobuf/BitControl.pb.h
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/BitControl.pb.h b/contrib/native/client/src/protobuf/BitControl.pb.h
index e26f7c0..8a7262b 100644
--- a/contrib/native/client/src/protobuf/BitControl.pb.h
+++ b/contrib/native/client/src/protobuf/BitControl.pb.h
@@ -45,29 +45,6 @@ class FragmentStatus;
 class PlanFragment;
 class WorkQueueStatus;
 
-enum FragmentStatus_FragmentState {
-  FragmentStatus_FragmentState_SENDING = 0,
-  FragmentStatus_FragmentState_AWAITING_ALLOCATION = 1,
-  FragmentStatus_FragmentState_RUNNING = 2,
-  FragmentStatus_FragmentState_FINISHED = 3,
-  FragmentStatus_FragmentState_CANCELLED = 4,
-  FragmentStatus_FragmentState_FAILED = 5
-};
-bool FragmentStatus_FragmentState_IsValid(int value);
-const FragmentStatus_FragmentState FragmentStatus_FragmentState_FragmentState_MIN = FragmentStatus_FragmentState_SENDING;
-const FragmentStatus_FragmentState FragmentStatus_FragmentState_FragmentState_MAX = FragmentStatus_FragmentState_FAILED;
-const int FragmentStatus_FragmentState_FragmentState_ARRAYSIZE = FragmentStatus_FragmentState_FragmentState_MAX + 1;
-
-const ::google::protobuf::EnumDescriptor* FragmentStatus_FragmentState_descriptor();
-inline const ::std::string& FragmentStatus_FragmentState_Name(FragmentStatus_FragmentState value) {
-  return ::google::protobuf::internal::NameOfEnum(
-    FragmentStatus_FragmentState_descriptor(), value);
-}
-inline bool FragmentStatus_FragmentState_Parse(
-    const ::std::string& name, FragmentStatus_FragmentState* value) {
-  return ::google::protobuf::internal::ParseNamedEnum<FragmentStatus_FragmentState>(
-    FragmentStatus_FragmentState_descriptor(), name, value);
-}
 enum RpcType {
   HANDSHAKE = 0,
   ACK = 1,
@@ -338,138 +315,40 @@ class FragmentStatus : public ::google::protobuf::Message {
 
   // nested types ----------------------------------------------------
 
-  typedef FragmentStatus_FragmentState FragmentState;
-  static const FragmentState SENDING = FragmentStatus_FragmentState_SENDING;
-  static const FragmentState AWAITING_ALLOCATION = FragmentStatus_FragmentState_AWAITING_ALLOCATION;
-  static const FragmentState RUNNING = FragmentStatus_FragmentState_RUNNING;
-  static const FragmentState FINISHED = FragmentStatus_FragmentState_FINISHED;
-  static const FragmentState CANCELLED = FragmentStatus_FragmentState_CANCELLED;
-  static const FragmentState FAILED = FragmentStatus_FragmentState_FAILED;
-  static inline bool FragmentState_IsValid(int value) {
-    return FragmentStatus_FragmentState_IsValid(value);
-  }
-  static const FragmentState FragmentState_MIN =
-    FragmentStatus_FragmentState_FragmentState_MIN;
-  static const FragmentState FragmentState_MAX =
-    FragmentStatus_FragmentState_FragmentState_MAX;
-  static const int FragmentState_ARRAYSIZE =
-    FragmentStatus_FragmentState_FragmentState_ARRAYSIZE;
-  static inline const ::google::protobuf::EnumDescriptor*
-  FragmentState_descriptor() {
-    return FragmentStatus_FragmentState_descriptor();
-  }
-  static inline const ::std::string& FragmentState_Name(FragmentState value) {
-    return FragmentStatus_FragmentState_Name(value);
-  }
-  static inline bool FragmentState_Parse(const ::std::string& name,
-      FragmentState* value) {
-    return FragmentStatus_FragmentState_Parse(name, value);
-  }
-
   // accessors -------------------------------------------------------
 
-  // optional int64 memory_use = 1;
-  inline bool has_memory_use() const;
-  inline void clear_memory_use();
-  static const int kMemoryUseFieldNumber = 1;
-  inline ::google::protobuf::int64 memory_use() const;
-  inline void set_memory_use(::google::protobuf::int64 value);
-
-  // optional int64 batches_completed = 2;
-  inline bool has_batches_completed() const;
-  inline void clear_batches_completed();
-  static const int kBatchesCompletedFieldNumber = 2;
-  inline ::google::protobuf::int64 batches_completed() const;
-  inline void set_batches_completed(::google::protobuf::int64 value);
-
-  // optional int64 records_completed = 3;
-  inline bool has_records_completed() const;
-  inline void clear_records_completed();
-  static const int kRecordsCompletedFieldNumber = 3;
-  inline ::google::protobuf::int64 records_completed() const;
-  inline void set_records_completed(::google::protobuf::int64 value);
-
-  // optional int32 estimated_completion_percentage = 4;
-  inline bool has_estimated_completion_percentage() const;
-  inline void clear_estimated_completion_percentage();
-  static const int kEstimatedCompletionPercentageFieldNumber = 4;
-  inline ::google::protobuf::int32 estimated_completion_percentage() const;
-  inline void set_estimated_completion_percentage(::google::protobuf::int32 value);
-
-  // optional .exec.bit.control.FragmentStatus.FragmentState state = 5;
-  inline bool has_state() const;
-  inline void clear_state();
-  static const int kStateFieldNumber = 5;
-  inline ::exec::bit::control::FragmentStatus_FragmentState state() const;
-  inline void set_state(::exec::bit::control::FragmentStatus_FragmentState value);
-
-  // optional int64 data_processed = 6;
-  inline bool has_data_processed() const;
-  inline void clear_data_processed();
-  static const int kDataProcessedFieldNumber = 6;
-  inline ::google::protobuf::int64 data_processed() const;
-  inline void set_data_processed(::google::protobuf::int64 value);
-
-  // optional .exec.bit.FragmentHandle handle = 7;
+  // optional .exec.shared.MinorFragmentProfile profile = 1;
+  inline bool has_profile() const;
+  inline void clear_profile();
+  static const int kProfileFieldNumber = 1;
+  inline const ::exec::shared::MinorFragmentProfile& profile() const;
+  inline ::exec::shared::MinorFragmentProfile* mutable_profile();
+  inline ::exec::shared::MinorFragmentProfile* release_profile();
+  inline void set_allocated_profile(::exec::shared::MinorFragmentProfile* profile);
+
+  // optional .exec.bit.FragmentHandle handle = 2;
   inline bool has_handle() const;
   inline void clear_handle();
-  static const int kHandleFieldNumber = 7;
+  static const int kHandleFieldNumber = 2;
   inline const ::exec::bit::FragmentHandle& handle() const;
   inline ::exec::bit::FragmentHandle* mutable_handle();
   inline ::exec::bit::FragmentHandle* release_handle();
   inline void set_allocated_handle(::exec::bit::FragmentHandle* handle);
 
-  // optional .exec.shared.DrillPBError error = 8;
-  inline bool has_error() const;
-  inline void clear_error();
-  static const int kErrorFieldNumber = 8;
-  inline const ::exec::shared::DrillPBError& error() const;
-  inline ::exec::shared::DrillPBError* mutable_error();
-  inline ::exec::shared::DrillPBError* release_error();
-  inline void set_allocated_error(::exec::shared::DrillPBError* error);
-
-  // optional int64 running_time = 9;
-  inline bool has_running_time() const;
-  inline void clear_running_time();
-  static const int kRunningTimeFieldNumber = 9;
-  inline ::google::protobuf::int64 running_time() const;
-  inline void set_running_time(::google::protobuf::int64 value);
-
   // @@protoc_insertion_point(class_scope:exec.bit.control.FragmentStatus)
  private:
-  inline void set_has_memory_use();
-  inline void clear_has_memory_use();
-  inline void set_has_batches_completed();
-  inline void clear_has_batches_completed();
-  inline void set_has_records_completed();
-  inline void clear_has_records_completed();
-  inline void set_has_estimated_completion_percentage();
-  inline void clear_has_estimated_completion_percentage();
-  inline void set_has_state();
-  inline void clear_has_state();
-  inline void set_has_data_processed();
-  inline void clear_has_data_processed();
+  inline void set_has_profile();
+  inline void clear_has_profile();
   inline void set_has_handle();
   inline void clear_has_handle();
-  inline void set_has_error();
-  inline void clear_has_error();
-  inline void set_has_running_time();
-  inline void clear_has_running_time();
 
   ::google::protobuf::UnknownFieldSet _unknown_fields_;
 
-  ::google::protobuf::int64 memory_use_;
-  ::google::protobuf::int64 batches_completed_;
-  ::google::protobuf::int64 records_completed_;
-  ::google::protobuf::int32 estimated_completion_percentage_;
-  int state_;
-  ::google::protobuf::int64 data_processed_;
+  ::exec::shared::MinorFragmentProfile* profile_;
   ::exec::bit::FragmentHandle* handle_;
-  ::exec::shared::DrillPBError* error_;
-  ::google::protobuf::int64 running_time_;
 
   mutable int _cached_size_;
-  ::google::protobuf::uint32 _has_bits_[(9 + 31) / 32];
+  ::google::protobuf::uint32 _has_bits_[(2 + 31) / 32];
 
   friend void  protobuf_AddDesc_BitControl_2eproto();
   friend void protobuf_AssignDesc_BitControl_2eproto();
@@ -945,148 +824,53 @@ BitStatus::mutable_fragment_status() {
 
 // FragmentStatus
 
-// optional int64 memory_use = 1;
-inline bool FragmentStatus::has_memory_use() const {
+// optional .exec.shared.MinorFragmentProfile profile = 1;
+inline bool FragmentStatus::has_profile() const {
   return (_has_bits_[0] & 0x00000001u) != 0;
 }
-inline void FragmentStatus::set_has_memory_use() {
+inline void FragmentStatus::set_has_profile() {
   _has_bits_[0] |= 0x00000001u;
 }
-inline void FragmentStatus::clear_has_memory_use() {
+inline void FragmentStatus::clear_has_profile() {
   _has_bits_[0] &= ~0x00000001u;
 }
-inline void FragmentStatus::clear_memory_use() {
-  memory_use_ = GOOGLE_LONGLONG(0);
-  clear_has_memory_use();
-}
-inline ::google::protobuf::int64 FragmentStatus::memory_use() const {
-  return memory_use_;
-}
-inline void FragmentStatus::set_memory_use(::google::protobuf::int64 value) {
-  set_has_memory_use();
-  memory_use_ = value;
-}
-
-// optional int64 batches_completed = 2;
-inline bool FragmentStatus::has_batches_completed() const {
-  return (_has_bits_[0] & 0x00000002u) != 0;
-}
-inline void FragmentStatus::set_has_batches_completed() {
-  _has_bits_[0] |= 0x00000002u;
-}
-inline void FragmentStatus::clear_has_batches_completed() {
-  _has_bits_[0] &= ~0x00000002u;
-}
-inline void FragmentStatus::clear_batches_completed() {
-  batches_completed_ = GOOGLE_LONGLONG(0);
-  clear_has_batches_completed();
-}
-inline ::google::protobuf::int64 FragmentStatus::batches_completed() const {
-  return batches_completed_;
-}
-inline void FragmentStatus::set_batches_completed(::google::protobuf::int64 value) {
-  set_has_batches_completed();
-  batches_completed_ = value;
-}
-
-// optional int64 records_completed = 3;
-inline bool FragmentStatus::has_records_completed() const {
-  return (_has_bits_[0] & 0x00000004u) != 0;
-}
-inline void FragmentStatus::set_has_records_completed() {
-  _has_bits_[0] |= 0x00000004u;
-}
-inline void FragmentStatus::clear_has_records_completed() {
-  _has_bits_[0] &= ~0x00000004u;
-}
-inline void FragmentStatus::clear_records_completed() {
-  records_completed_ = GOOGLE_LONGLONG(0);
-  clear_has_records_completed();
-}
-inline ::google::protobuf::int64 FragmentStatus::records_completed() const {
-  return records_completed_;
-}
-inline void FragmentStatus::set_records_completed(::google::protobuf::int64 value) {
-  set_has_records_completed();
-  records_completed_ = value;
-}
-
-// optional int32 estimated_completion_percentage = 4;
-inline bool FragmentStatus::has_estimated_completion_percentage() const {
-  return (_has_bits_[0] & 0x00000008u) != 0;
-}
-inline void FragmentStatus::set_has_estimated_completion_percentage() {
-  _has_bits_[0] |= 0x00000008u;
-}
-inline void FragmentStatus::clear_has_estimated_completion_percentage() {
-  _has_bits_[0] &= ~0x00000008u;
-}
-inline void FragmentStatus::clear_estimated_completion_percentage() {
-  estimated_completion_percentage_ = 0;
-  clear_has_estimated_completion_percentage();
-}
-inline ::google::protobuf::int32 FragmentStatus::estimated_completion_percentage() const {
-  return estimated_completion_percentage_;
-}
-inline void FragmentStatus::set_estimated_completion_percentage(::google::protobuf::int32 value) {
-  set_has_estimated_completion_percentage();
-  estimated_completion_percentage_ = value;
-}
-
-// optional .exec.bit.control.FragmentStatus.FragmentState state = 5;
-inline bool FragmentStatus::has_state() const {
-  return (_has_bits_[0] & 0x00000010u) != 0;
-}
-inline void FragmentStatus::set_has_state() {
-  _has_bits_[0] |= 0x00000010u;
-}
-inline void FragmentStatus::clear_has_state() {
-  _has_bits_[0] &= ~0x00000010u;
-}
-inline void FragmentStatus::clear_state() {
-  state_ = 0;
-  clear_has_state();
-}
-inline ::exec::bit::control::FragmentStatus_FragmentState FragmentStatus::state() const {
-  return static_cast< ::exec::bit::control::FragmentStatus_FragmentState >(state_);
-}
-inline void FragmentStatus::set_state(::exec::bit::control::FragmentStatus_FragmentState value) {
-  assert(::exec::bit::control::FragmentStatus_FragmentState_IsValid(value));
-  set_has_state();
-  state_ = value;
+inline void FragmentStatus::clear_profile() {
+  if (profile_ != NULL) profile_->::exec::shared::MinorFragmentProfile::Clear();
+  clear_has_profile();
 }
-
-// optional int64 data_processed = 6;
-inline bool FragmentStatus::has_data_processed() const {
-  return (_has_bits_[0] & 0x00000020u) != 0;
-}
-inline void FragmentStatus::set_has_data_processed() {
-  _has_bits_[0] |= 0x00000020u;
-}
-inline void FragmentStatus::clear_has_data_processed() {
-  _has_bits_[0] &= ~0x00000020u;
+inline const ::exec::shared::MinorFragmentProfile& FragmentStatus::profile() const {
+  return profile_ != NULL ? *profile_ : *default_instance_->profile_;
 }
-inline void FragmentStatus::clear_data_processed() {
-  data_processed_ = GOOGLE_LONGLONG(0);
-  clear_has_data_processed();
+inline ::exec::shared::MinorFragmentProfile* FragmentStatus::mutable_profile() {
+  set_has_profile();
+  if (profile_ == NULL) profile_ = new ::exec::shared::MinorFragmentProfile;
+  return profile_;
 }
-inline ::google::protobuf::int64 FragmentStatus::data_processed() const {
-  return data_processed_;
+inline ::exec::shared::MinorFragmentProfile* FragmentStatus::release_profile() {
+  clear_has_profile();
+  ::exec::shared::MinorFragmentProfile* temp = profile_;
+  profile_ = NULL;
+  return temp;
 }
-inline void FragmentStatus::set_data_processed(::google::protobuf::int64 value) {
-  set_has_data_processed();
-  data_processed_ = value;
+inline void FragmentStatus::set_allocated_profile(::exec::shared::MinorFragmentProfile* profile) {
+  delete profile_;
+  profile_ = profile;
+  if (profile) {
+    set_has_profile();
+  } else {
+    clear_has_profile();
+  }
 }
 
-// optional .exec.bit.FragmentHandle handle = 7;
+// optional .exec.bit.FragmentHandle handle = 2;
 inline bool FragmentStatus::has_handle() const {
-  return (_has_bits_[0] & 0x00000040u) != 0;
+  return (_has_bits_[0] & 0x00000002u) != 0;
 }
 inline void FragmentStatus::set_has_handle() {
-  _has_bits_[0] |= 0x00000040u;
+  _has_bits_[0] |= 0x00000002u;
 }
 inline void FragmentStatus::clear_has_handle() {
-  _has_bits_[0] &= ~0x00000040u;
+  _has_bits_[0] &= ~0x00000002u;
 }
 inline void FragmentStatus::clear_handle() {
   if (handle_ != NULL) handle_->::exec::bit::FragmentHandle::Clear();
@@ -1116,66 +900,6 @@ inline void FragmentStatus::set_allocated_handle(::exec::bit::FragmentHandle* ha
   }
 }
 
-// optional .exec.shared.DrillPBError error = 8;
-inline bool FragmentStatus::has_error() const {
-  return (_has_bits_[0] & 0x00000080u) != 0;
-}
-inline void FragmentStatus::set_has_error() {
-  _has_bits_[0] |= 0x00000080u;
-}
-inline void FragmentStatus::clear_has_error() {
-  _has_bits_[0] &= ~0x00000080u;
-}
-inline void FragmentStatus::clear_error() {
-  if (error_ != NULL) error_->::exec::shared::DrillPBError::Clear();
-  clear_has_error();
-}
-inline const ::exec::shared::DrillPBError& FragmentStatus::error() const {
-  return error_ != NULL ? *error_ : *default_instance_->error_;
-}
-inline ::exec::shared::DrillPBError* FragmentStatus::mutable_error() {
-  set_has_error();
-  if (error_ == NULL) error_ = new ::exec::shared::DrillPBError;
-  return error_;
-}
-inline ::exec::shared::DrillPBError* FragmentStatus::release_error() {
-  clear_has_error();
-  ::exec::shared::DrillPBError* temp = error_;
-  error_ = NULL;
-  return temp;
-}
-inline void FragmentStatus::set_allocated_error(::exec::shared::DrillPBError* error) {
-  delete error_;
-  error_ = error;
-  if (error) {
-    set_has_error();
-  } else {
-    clear_has_error();
-  }
-}
-
-// optional int64 running_time = 9;
-inline bool FragmentStatus::has_running_time() const {
-  return (_has_bits_[0] & 0x00000100u) != 0;
-}
-inline void FragmentStatus::set_has_running_time() {
-  _has_bits_[0] |= 0x00000100u;
-}
-inline void FragmentStatus::clear_has_running_time() {
-  _has_bits_[0] &= ~0x00000100u;
-}
-inline void FragmentStatus::clear_running_time() {
-  running_time_ = GOOGLE_LONGLONG(0);
-  clear_has_running_time();
-}
-inline ::google::protobuf::int64 FragmentStatus::running_time() const {
-  return running_time_;
-}
-inline void FragmentStatus::set_running_time(::google::protobuf::int64 value) {
-  set_has_running_time();
-  running_time_ = value;
-}
-
 // -------------------------------------------------------------------
 
 // PlanFragment
@@ -1768,10 +1492,6 @@ namespace google {
 namespace protobuf {
 
 template <>
-inline const EnumDescriptor* GetEnumDescriptor< ::exec::bit::control::FragmentStatus_FragmentState>() {
-  return ::exec::bit::control::FragmentStatus_FragmentState_descriptor();
-}
-template <>
 inline const EnumDescriptor* GetEnumDescriptor< ::exec::bit::control::RpcType>() {
   return ::exec::bit::control::RpcType_descriptor();
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/protobuf/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/CMakeLists.txt b/contrib/native/client/src/protobuf/CMakeLists.txt
index 98fbfba..154138d 100644
--- a/contrib/native/client/src/protobuf/CMakeLists.txt
+++ b/contrib/native/client/src/protobuf/CMakeLists.txt
@@ -22,48 +22,48 @@ include_directories(${PROTOBUF_INCLUDE_DIR})
 
 #Generate Protobuf code
 
-set (PROTO_SRC_FILES 
-    ${CMAKE_BINARY_DIR}/protobuf/BitControl.proto 
-    ${CMAKE_BINARY_DIR}/protobuf/Coordination.proto 
-    ${CMAKE_BINARY_DIR}/protobuf/GeneralRPC.proto 
-    ${CMAKE_BINARY_DIR}/protobuf/Types.proto 
-    ${CMAKE_BINARY_DIR}/protobuf/UserBitShared.proto 
-    ${CMAKE_BINARY_DIR}/protobuf/BitData.proto 
-    ${CMAKE_BINARY_DIR}/protobuf/ExecutionProtos.proto 
-    ${CMAKE_BINARY_DIR}/protobuf/SchemaDef.proto 
+set (PROTO_SRC_FILES
+    ${CMAKE_BINARY_DIR}/protobuf/BitControl.proto
+    ${CMAKE_BINARY_DIR}/protobuf/Coordination.proto
+    ${CMAKE_BINARY_DIR}/protobuf/GeneralRPC.proto
+    ${CMAKE_BINARY_DIR}/protobuf/Types.proto
+    ${CMAKE_BINARY_DIR}/protobuf/UserBitShared.proto
+    ${CMAKE_BINARY_DIR}/protobuf/BitData.proto
+    ${CMAKE_BINARY_DIR}/protobuf/ExecutionProtos.proto
+    ${CMAKE_BINARY_DIR}/protobuf/SchemaDef.proto
     ${CMAKE_BINARY_DIR}/protobuf/User.proto
     )
 
 PROTOBUF_GENERATE_CPP(GenProtoSources GenProtoHeaders ${PROTO_SRC_FILES})
 
-set (PROTO_CPPSRC_FILES 
-    ${CMAKE_CURRENT_SOURCE_DIR}/BitControl.pb.cc 
-    ${CMAKE_CURRENT_SOURCE_DIR}/Coordination.pb.cc 
-    ${CMAKE_CURRENT_SOURCE_DIR}/GeneralRPC.pb.cc 
-    ${CMAKE_CURRENT_SOURCE_DIR}/Types.pb.cc 
-    ${CMAKE_CURRENT_SOURCE_DIR}/UserBitShared.pb.cc 
-    ${CMAKE_CURRENT_SOURCE_DIR}/BitData.pb.cc 
-    ${CMAKE_CURRENT_SOURCE_DIR}/ExecutionProtos.pb.cc 
-    ${CMAKE_CURRENT_SOURCE_DIR}/SchemaDef.pb.cc 
+set (PROTO_CPPSRC_FILES
+    ${CMAKE_CURRENT_SOURCE_DIR}/BitControl.pb.cc
+    ${CMAKE_CURRENT_SOURCE_DIR}/Coordination.pb.cc
+    ${CMAKE_CURRENT_SOURCE_DIR}/GeneralRPC.pb.cc
+    ${CMAKE_CURRENT_SOURCE_DIR}/Types.pb.cc
+    ${CMAKE_CURRENT_SOURCE_DIR}/UserBitShared.pb.cc
+    ${CMAKE_CURRENT_SOURCE_DIR}/BitData.pb.cc
+    ${CMAKE_CURRENT_SOURCE_DIR}/ExecutionProtos.pb.cc
+    ${CMAKE_CURRENT_SOURCE_DIR}/SchemaDef.pb.cc
     ${CMAKE_CURRENT_SOURCE_DIR}/User.pb.cc
     )
 
 set (PROTO_HDR_DIR ${CMAKE_CURRENT_SOURCE_DIR})
 
-set (PROTO_CPPHDR_FILES 
-    ${PROTO_HDR_DIR}/BitControl.pb.h 
-    ${PROTO_HDR_DIR}/Coordination.pb.h 
-    ${PROTO_HDR_DIR}/GeneralRPC.pb.h 
-    ${PROTO_HDR_DIR}/UserBitShared.pb.h 
-    ${PROTO_HDR_DIR}/BitData.pb.h 
-    ${PROTO_HDR_DIR}/ExecutionProtos.pb.h 
-    ${PROTO_HDR_DIR}/SchemaDef.pb.h 
+set (PROTO_CPPHDR_FILES
+    ${PROTO_HDR_DIR}/BitControl.pb.h
+    ${PROTO_HDR_DIR}/Coordination.pb.h
+    ${PROTO_HDR_DIR}/GeneralRPC.pb.h
+    ${PROTO_HDR_DIR}/UserBitShared.pb.h
+    ${PROTO_HDR_DIR}/BitData.pb.h
+    ${PROTO_HDR_DIR}/ExecutionProtos.pb.h
+    ${PROTO_HDR_DIR}/SchemaDef.pb.h
     )
 
 set (PROTO_INC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../include/drill/protobuf)
 
-set (PROTO_CPPINC_FILES 
-    ${PROTO_INC_DIR}/Types.pb.h 
+set (PROTO_CPPINC_FILES
+    ${PROTO_INC_DIR}/Types.pb.h
     ${PROTO_INC_DIR}/User.pb.h
     )
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/aaa4db74/contrib/native/client/src/protobuf/User.pb.cc
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/protobuf/User.pb.cc b/contrib/native/client/src/protobuf/User.pb.cc
index d2826ea..a2bd4df 100644
--- a/contrib/native/client/src/protobuf/User.pb.cc
+++ b/contrib/native/client/src/protobuf/User.pb.cc
@@ -47,7 +47,6 @@ const ::google::protobuf::internal::GeneratedMessageReflection*
   QueryResult_reflection_ = NULL;
 const ::google::protobuf::EnumDescriptor* QueryResult_QueryState_descriptor_ = NULL;
 const ::google::protobuf::EnumDescriptor* RpcType_descriptor_ = NULL;
-const ::google::protobuf::EnumDescriptor* QueryType_descriptor_ = NULL;
 const ::google::protobuf::EnumDescriptor* QueryResultsMode_descriptor_ = NULL;
 
 }  // namespace
@@ -200,8 +199,7 @@ void protobuf_AssignDesc_User_2eproto() {
       sizeof(QueryResult));
   QueryResult_QueryState_descriptor_ = QueryResult_descriptor_->enum_type(0);
   RpcType_descriptor_ = file->enum_type(0);
-  QueryType_descriptor_ = file->enum_type(1);
-  QueryResultsMode_descriptor_ = file->enum_type(2);
+  QueryResultsMode_descriptor_ = file->enum_type(1);
 }
 
 namespace {
@@ -273,32 +271,31 @@ void protobuf_AddDesc_User_2eproto() {
     "redentials\022-\n\nproperties\030\005 \001(\0132\031.exec.us"
     "er.UserProperties\"S\n\016RequestResults\022&\n\010q"
     "uery_id\030\001 \001(\0132\024.exec.shared.QueryId\022\031\n\021m"
-    "aximum_responses\030\002 \001(\005\"o\n\010RunQuery\0221\n\014re"
+    "aximum_responses\030\002 \001(\005\"q\n\010RunQuery\0221\n\014re"
     "sults_mode\030\001 \001(\0162\033.exec.user.QueryResult"
-    "sMode\022\"\n\004type\030\002 \001(\0162\024.exec.user.QueryTyp"
-    "e\022\014\n\004plan\030\003 \001(\t\")\n\022BitToUserHandshake\022\023\n"
-    "\013rpc_version\030\002 \001(\005\"7\n\nNodeStatus\022\017\n\007node"
-    "_id\030\001 \001(\005\022\030\n\020memory_footprint\030\002 \001(\003\"\331\003\n\013"
-    "QueryResult\0226\n\013query_state\030\001 \001(\0162!.exec."
-    "user.QueryResult.QueryState\022&\n\010query_id\030"
-    "\002 \001(\0132\024.exec.shared.QueryId\022\025\n\ris_last_c"
-    "hunk\030\003 \001(\010\022\021\n\trow_count\030\004 \001(\005\022\024\n\014records"
-    "_scan\030\005 \001(\003\022\025\n\rrecords_error\030\006 \001(\003\022\027\n\017su"
-    "bmission_time\030\007 \001(\003\022*\n\013node_status\030\010 \003(\013"
-    "2\025.exec.user.NodeStatus\022(\n\005error\030\t \003(\0132\031"
-    ".exec.shared.DrillPBError\022(\n\003def\030\n \001(\0132\033"
-    ".exec.shared.RecordBatchDef\022\026\n\016schema_ch"
-    "anged\030\013 \001(\010\"b\n\nQueryState\022\013\n\007PENDING\020\000\022\013"
-    "\n\007RUNNING\020\001\022\r\n\tCOMPLETED\020\002\022\014\n\010CANCELED\020\003"
-    "\022\n\n\006FAILED\020\004\022\021\n\rUNKNOWN_QUERY\020\005*\270\001\n\007RpcT"
-    "ype\022\r\n\tHANDSHAKE\020\000\022\007\n\003ACK\020\001\022\013\n\007GOODBYE\020\002"
-    "\022\r\n\tRUN_QUERY\020\003\022\020\n\014CANCEL_QUERY\020\004\022\023\n\017REQ"
-    "UEST_RESULTS\020\005\022\020\n\014QUERY_RESULT\020\006\022\020\n\014QUER"
-    "Y_HANDLE\020\007\022\026\n\022REQ_META_FUNCTIONS\020\010\022\026\n\022RE"
-    "SP_FUNCTION_LIST\020\t*/\n\tQueryType\022\007\n\003SQL\020\001"
-    "\022\013\n\007LOGICAL\020\002\022\014\n\010PHYSICAL\020\003*#\n\020QueryResu"
-    "ltsMode\022\017\n\013STREAM_FULL\020\001B+\n\033org.apache.d"
-    "rill.exec.protoB\nUserProtosH\001", 1469);
+    "sMode\022$\n\004type\030\002 \001(\0162\026.exec.shared.QueryT"
+    "ype\022\014\n\004plan\030\003 \001(\t\")\n\022BitToUserHandshake\022"
+    "\023\n\013rpc_version\030\002 \001(\005\"7\n\nNodeStatus\022\017\n\007no"
+    "de_id\030\001 \001(\005\022\030\n\020memory_footprint\030\002 \001(\003\"\331\003"
+    "\n\013QueryResult\0226\n\013query_state\030\001 \001(\0162!.exe"
+    "c.user.QueryResult.QueryState\022&\n\010query_i"
+    "d\030\002 \001(\0132\024.exec.shared.QueryId\022\025\n\ris_last"
+    "_chunk\030\003 \001(\010\022\021\n\trow_count\030\004 \001(\005\022\024\n\014recor"
+    "ds_scan\030\005 \001(\003\022\025\n\rrecords_error\030\006 \001(\003\022\027\n\017"
+    "submission_time\030\007 \001(\003\022*\n\013node_status\030\010 \003"
+    "(\0132\025.exec.user.NodeStatus\022(\n\005error\030\t \003(\013"
+    "2\031.exec.shared.DrillPBError\022(\n\003def\030\n \001(\013"
+    "2\033.exec.shared.RecordBatchDef\022\026\n\016schema_"
+    "changed\030\013 \001(\010\"b\n\nQueryState\022\013\n\007PENDING\020\000"
+    "\022\013\n\007RUNNING\020\001\022\r\n\tCOMPLETED\020\002\022\014\n\010CANCELED"
+    "\020\003\022\n\n\006FAILED\020\004\022\021\n\rUNKNOWN_QUERY\020\005*\270\001\n\007Rp"
+    "cType\022\r\n\tHANDSHAKE\020\000\022\007\n\003ACK\020\001\022\013\n\007GOODBYE"
+    "\020\002\022\r\n\tRUN_QUERY\020\003\022\020\n\014CANCEL_QUERY\020\004\022\023\n\017R"
+    "EQUEST_RESULTS\020\005\022\020\n\014QUERY_RESULT\020\006\022\020\n\014QU"
+    "ERY_HANDLE\020\007\022\026\n\022REQ_META_FUNCTIONS\020\010\022\026\n\022"
+    "RESP_FUNCTION_LIST\020\t*#\n\020QueryResultsMode"
+    "\022\017\n\013STREAM_FULL\020\001B+\n\033org.apache.drill.ex"
+    "ec.protoB\nUserProtosH\001", 1422);
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
     "User.proto", &protobuf_RegisterTypes);
   Property::default_instance_ = new Property();
@@ -348,21 +345,6 @@ bool RpcType_IsValid(int value) {
   }
 }
 
-const ::google::protobuf::EnumDescriptor* QueryType_descriptor() {
-  protobuf_AssignDescriptorsOnce();
-  return QueryType_descriptor_;
-}
-bool QueryType_IsValid(int value) {
-  switch(value) {
-    case 1:
-    case 2:
-    case 3:
-      return true;
-    default:
-      return false;
-  }
-}
-
 const ::google::protobuf::EnumDescriptor* QueryResultsMode_descriptor() {
   protobuf_AssignDescriptorsOnce();
   return QueryResultsMode_descriptor_;
@@ -1607,7 +1589,7 @@ bool RunQuery::MergePartialFromCodedStream(
         break;
       }
 
-      // optional .exec.user.QueryType type = 2;
+      // optional .exec.shared.QueryType type = 2;
       case 2: {
         if (::google::protobuf::internal::WireFormatLite::GetTagWireType(tag) ==
             ::google::protobuf::internal::WireFormatLite::WIRETYPE_VARINT) {
@@ -1616,8 +1598,8 @@ bool RunQuery::MergePartialFromCodedStream(
           DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
                    int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
                  input, &value)));
-          if (::exec::user::QueryType_IsValid(value)) {
-            set_type(static_cast< ::exec::user::QueryType >(value));
+          if (::exec::shared::QueryType_IsValid(value)) {
+            set_type(static_cast< ::exec::shared::QueryType >(value));
           } else {
             mutable_unknown_fields()->AddVarint(2, value);
           }
@@ -1669,7 +1651,7 @@ void RunQuery::SerializeWithCachedSizes(
       1, this->results_mode(), output);
   }
 
-  // optional .exec.user.QueryType type = 2;
+  // optional .exec.shared.QueryType type = 2;
   if (has_type()) {
     ::google::protobuf::internal::WireFormatLite::WriteEnum(
       2, this->type(), output);
@@ -1698,7 +1680,7 @@ void RunQuery::SerializeWithCachedSizes(
       1, this->results_mode(), target);
   }
 
-  // optional .exec.user.QueryType type = 2;
+  // optional .exec.shared.QueryType type = 2;
   if (has_type()) {
     target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
       2, this->type(), target);
@@ -1731,7 +1713,7 @@ int RunQuery::ByteSize() const {
         ::google::protobuf::internal::WireFormatLite::EnumSize(this->results_mode());
     }
 
-    // optional .exec.user.QueryType type = 2;
+    // optional .exec.shared.QueryType type = 2;
     if (has_type()) {
       total_size += 1 +
         ::google::protobuf::internal::WireFormatLite::EnumSize(this->type());


[02/32] git commit: DRILL-772 : Change comparison operator to use NULL_IF_NULL policy, when inputs are nullable type.

Posted by ja...@apache.org.
DRILL-772 :  Change comparison operator to use NULL_IF_NULL policy, when inputs are nullable type.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/42057800
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/42057800
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/42057800

Branch: refs/heads/master
Commit: 42057800b1f2a4f1bdf771760c9c33a822fc435b
Parents: 27d3e71
Author: Jinfeng Ni <jn...@maprtech.com>
Authored: Tue Jun 17 16:31:32 2014 -0700
Committer: Jinfeng Ni <jn...@maprtech.com>
Committed: Wed Jun 18 07:13:56 2014 -0700

----------------------------------------------------------------------
 .../codegen/templates/ComparisonFunctions.java  | 109 +++----------------
 1 file changed, 15 insertions(+), 94 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/42057800/exec/java-exec/src/main/codegen/templates/ComparisonFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/ComparisonFunctions.java b/exec/java-exec/src/main/codegen/templates/ComparisonFunctions.java
index b57a37f..df60650 100644
--- a/exec/java-exec/src/main/codegen/templates/ComparisonFunctions.java
+++ b/exec/java-exec/src/main/codegen/templates/ComparisonFunctions.java
@@ -96,6 +96,7 @@ package org.apache.drill.exec.expr.fn.impl;
 
 import org.apache.drill.exec.expr.DrillSimpleFunc;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.expr.annotations.Output;
 import org.apache.drill.exec.expr.annotations.Param;
 import org.apache.drill.exec.expr.holders.*;
@@ -117,8 +118,10 @@ public class GCompare${left}${right}{
         <@compareBlock mode=type.mode left=left right=right output="out.value" nullCompare=true />
       }
   }
-  
-  @FunctionTemplate(names = {"less_than", "<"}, scope = FunctionTemplate.FunctionScope.SIMPLE)
+
+  <#if ! left?starts_with("Nullable")  &&  ! right?starts_with("Nullable") >
+
+  @FunctionTemplate(names = {"less_than", "<"}, scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
   public static class LessThan${left}${right} implements DrillSimpleFunc {
 
       @Param ${left}Holder left;
@@ -128,20 +131,7 @@ public class GCompare${left}${right}{
       public void setup(RecordBatch b) {}
 
       public void eval() {
-        sout: {
-        <#if left?starts_with("Nullable")>
-        if(left.isSet ==0){
-          out.value = 0;
-          break sout;
-        }
-        </#if>
-        <#if right?starts_with("Nullable")>
-        if(right.isSet ==0){
-          out.value = 0;
-          break sout;
-        }
-        </#if>
-
+        
         <#if type.mode == "var" >
         int cmp;
         <@compareBlock mode=type.mode left=left right=right output="cmp" nullCompare=false/>
@@ -150,11 +140,10 @@ public class GCompare${left}${right}{
         out.value = left.value < right.value ? 1 : 0;
         </#if>
 
-        }
       }
   }
   
-  @FunctionTemplate(names = {"less_than_or_equal_to", "<="}, scope = FunctionTemplate.FunctionScope.SIMPLE)
+  @FunctionTemplate(names = {"less_than_or_equal_to", "<="}, scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
   public static class LessThanE${left}${right} implements DrillSimpleFunc {
 
       @Param ${left}Holder left;
@@ -164,19 +153,6 @@ public class GCompare${left}${right}{
       public void setup(RecordBatch b) {}
 
       public void eval() {
-        sout: {
-        <#if left?starts_with("Nullable")>
-        if(left.isSet ==0){
-          out.value = 0;
-          break sout;
-        }
-        </#if>
-        <#if right?starts_with("Nullable")>
-        if(right.isSet ==0){
-          out.value = 0;
-          break sout;
-        }
-        </#if>
         
         <#if type.mode == "var" >
         int cmp;
@@ -186,11 +162,10 @@ public class GCompare${left}${right}{
         out.value = left.value <= right.value ? 1 : 0;
         </#if>
 
-        }
     }
   }
   
-  @FunctionTemplate(names = {"greater_than", ">"}, scope = FunctionTemplate.FunctionScope.SIMPLE)
+  @FunctionTemplate(names = {"greater_than", ">"}, scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
   public static class GreaterThan${left}${right} implements DrillSimpleFunc {
 
       @Param ${left}Holder left;
@@ -200,19 +175,6 @@ public class GCompare${left}${right}{
       public void setup(RecordBatch b) {}
 
       public void eval() {
-        sout: {
-        <#if left?starts_with("Nullable")>
-        if(left.isSet ==0){
-          out.value = 0;
-          break sout;
-        }
-        </#if>
-        <#if right?starts_with("Nullable")>
-        if(right.isSet ==0){
-          out.value = 0;
-          break sout;
-        }
-        </#if>
         
         <#if type.mode == "var" >
         int cmp;
@@ -222,11 +184,10 @@ public class GCompare${left}${right}{
         out.value = left.value > right.value ? 1 : 0;
         </#if>
 
-        }
     }
   }
   
-  @FunctionTemplate(names = {"greater_than_or_equal_to", ">="}, scope = FunctionTemplate.FunctionScope.SIMPLE)
+  @FunctionTemplate(names = {"greater_than_or_equal_to", ">="}, scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
   public static class GreaterThanE${left}${right} implements DrillSimpleFunc {
 
       @Param ${left}Holder left;
@@ -236,19 +197,6 @@ public class GCompare${left}${right}{
       public void setup(RecordBatch b) {}
 
       public void eval() {
-        sout: {
-        <#if left?starts_with("Nullable")>
-        if(left.isSet ==0){
-          out.value = 0;
-          break sout;
-        }
-        </#if>
-        <#if right?starts_with("Nullable")>
-        if(right.isSet ==0){
-          out.value = 0;
-          break sout;
-        }
-        </#if>
         
         <#if type.mode == "var" >            
         int cmp;
@@ -258,11 +206,10 @@ public class GCompare${left}${right}{
         out.value = left.value >= right.value ? 1 : 0;
         </#if>
 
-        }
       }
   }
   
-  @FunctionTemplate(names = {"equal","==","="}, scope = FunctionTemplate.FunctionScope.SIMPLE)
+  @FunctionTemplate(names = {"equal","==","="}, scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
   public static class Equals${left}${right} implements DrillSimpleFunc {
 
       @Param ${left}Holder left;
@@ -272,19 +219,6 @@ public class GCompare${left}${right}{
       public void setup(RecordBatch b) {}
 
       public void eval() {
-        sout: {
-          <#if left?starts_with("Nullable")>
-          if(left.isSet ==0){
-            out.value = 0;
-            break sout;
-          }
-          </#if>
-          <#if right?starts_with("Nullable")>
-          if(right.isSet ==0){
-            out.value = 0;
-            break sout;
-          }
-          </#if>
         
           <#if type.mode == "var" >
 outside: 
@@ -310,11 +244,10 @@ outside:
           out.value = left.value == right.value ? 1 : 0;
           </#if>
 
-        }
       }
   }
   
-  @FunctionTemplate(names = {"not_equal","<>","!="}, scope = FunctionTemplate.FunctionScope.SIMPLE)
+  @FunctionTemplate(names = {"not_equal","<>","!="}, scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
   public static class NotEquals${left}${right} implements DrillSimpleFunc {
 
       @Param ${left}Holder left;
@@ -324,19 +257,6 @@ outside:
       public void setup(RecordBatch b) {}
 
       public void eval() {
-        sout: {
-        <#if left?starts_with("Nullable")>
-        if(left.isSet ==0){
-          out.value = 0;
-          break sout;
-        }
-        </#if>
-        <#if right?starts_with("Nullable")>
-        if(right.isSet ==0){
-          out.value = 0;
-          break sout;
-        }
-        </#if>
         
         <#if type.mode == "var" >            
         int cmp;
@@ -345,11 +265,12 @@ outside:
         <#else>
         out.value = left.value != right.value ? 1 : 0;
         </#if>
-        
-        }
-        
+                
       }
   }
+
+  </#if>
+
 }
 </#list>
 </#list>


[24/32] git commit: DRILL-1049: Handle few types missing in ExpressionStringBuilder and ExpressionTreeMaterializer.castEqual

Posted by ja...@apache.org.
DRILL-1049: Handle few types missing in ExpressionStringBuilder and ExpressionTreeMaterializer.castEqual


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/4d13046d
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/4d13046d
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/4d13046d

Branch: refs/heads/master
Commit: 4d13046dddef73b960bbcba1958a2a262b1e02d6
Parents: 980dc87
Author: vkorukanti <ve...@gmail.com>
Authored: Fri Jun 20 10:04:11 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Fri Jun 20 10:56:16 2014 -0700

----------------------------------------------------------------------
 .../expression/ExpressionStringBuilder.java     |  2 ++
 .../exec/expr/ExpressionTreeMaterializer.java   | 28 +++++++++++++-------
 2 files changed, 20 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/4d13046d/common/src/main/java/org/apache/drill/common/expression/ExpressionStringBuilder.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/ExpressionStringBuilder.java b/common/src/main/java/org/apache/drill/common/expression/ExpressionStringBuilder.java
index 7d54b6b..edc1a53 100644
--- a/common/src/main/java/org/apache/drill/common/expression/ExpressionStringBuilder.java
+++ b/common/src/main/java/org/apache/drill/common/expression/ExpressionStringBuilder.java
@@ -247,7 +247,9 @@ public class ExpressionStringBuilder extends AbstractExprVisitor<Void, StringBui
     switch(mt.getMinorType()){
     case FLOAT4:
     case FLOAT8:
+    case BIT:
     case INT:
+    case TINYINT:
     case SMALLINT:
     case BIGINT:
     case UINT1:

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/4d13046d/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
index 4594c43..18cd894 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
@@ -510,22 +510,30 @@ public class ExpressionTreeMaterializer {
       case BIGINT:
       case BIT:
       case TINYINT:
+      case SMALLINT:
       case UINT1:
       case UINT2:
       case UINT4:
       case UINT8:
+      case TIME:
+      case TIMESTAMP:
+      case TIMESTAMPTZ:
+      case DATE:
+      case INTERVAL:
+      case INTERVALDAY:
+      case INTERVALYEAR:
         // nothing else matters.
         return true;
-    case DECIMAL9:
-    case DECIMAL18:
-    case DECIMAL28DENSE:
-    case DECIMAL28SPARSE:
-    case DECIMAL38DENSE:
-    case DECIMAL38SPARSE:
-      if (to.getScale() == from.getScale() && to.getPrecision() == from.getPrecision()) {
-        return true;
-      }
-      return false;
+      case DECIMAL9:
+      case DECIMAL18:
+      case DECIMAL28DENSE:
+      case DECIMAL28SPARSE:
+      case DECIMAL38DENSE:
+      case DECIMAL38SPARSE:
+        if (to.getScale() == from.getScale() && to.getPrecision() == from.getPrecision()) {
+          return true;
+        }
+        return false;
 
       case FIXED16CHAR:
       case FIXEDBINARY:


[27/32] DRILL-1024: Move hive storage code out of 'exec/java-exec' into 'contrib/storage-hive' module.

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java
deleted file mode 100644
index 72cadc9..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.expr.fn;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import com.google.common.collect.Sets;
-import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.expression.FunctionCall;
-import org.apache.drill.common.types.TypeProtos.MajorType;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.common.util.PathScanner;
-import org.apache.drill.exec.expr.fn.impl.hive.ObjectInspectorHelper;
-import org.apache.drill.exec.planner.sql.DrillOperatorTable;
-import org.apache.drill.exec.planner.sql.HiveUDFOperator;
-import org.apache.hadoop.hive.ql.exec.Description;
-import org.apache.hadoop.hive.ql.exec.UDF;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
-import org.apache.hadoop.hive.ql.udf.UDFType;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-
-import com.google.common.collect.ArrayListMultimap;
-
-public class HiveFunctionRegistry implements PluggableFunctionRegistry{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveFunctionRegistry.class);
-
-  private ArrayListMultimap<String, Class<? extends GenericUDF>> methodsGenericUDF = ArrayListMultimap.create();
-  private ArrayListMultimap<String, Class<? extends UDF>> methodsUDF = ArrayListMultimap.create();
-  private HashSet<Class<?>> nonDeterministicUDFs = new HashSet<>();
-
-  /**
-   * Scan the classpath for implementation of GenericUDF/UDF interfaces,
-   * extracts function annotation and store the
-   * (function name) --> (implementation class) mappings.
-   * @param config
-   */
-  public HiveFunctionRegistry(DrillConfig config){
-    Set<Class<? extends GenericUDF>> genericUDFClasses = PathScanner.scanForImplementations(GenericUDF.class, null);
-    for (Class<? extends GenericUDF> clazz : genericUDFClasses)
-      register(clazz, methodsGenericUDF);
-
-    Set<Class<? extends UDF>> udfClasses = PathScanner.scanForImplementations(UDF.class, null);
-    for (Class<? extends UDF> clazz : udfClasses)
-      register(clazz, methodsUDF);
-  }
-
-  @Override
-  public void register(DrillOperatorTable operatorTable) {
-    for (String name : Sets.union(methodsGenericUDF.asMap().keySet(), methodsUDF.asMap().keySet())) {
-      operatorTable.add(name, new HiveUDFOperator(name.toUpperCase()));
-    }
-  }
-
-  private <C,I> void register(Class<? extends I> clazz, ArrayListMultimap<String,Class<? extends I>> methods) {
-    Description desc = clazz.getAnnotation(Description.class);
-    String[] names;
-    if(desc != null){
-      names = desc.name().split(",");
-      for(int i=0; i<names.length; i++) names[i] = names[i].trim();
-    }else{
-      names = new String[]{clazz.getName().replace('.', '_')};
-    }
-    
-    UDFType type = clazz.getAnnotation(UDFType.class);
-    if (type != null && type.deterministic()) nonDeterministicUDFs.add(clazz);
-
-
-    for(int i=0; i<names.length;i++){
-      methods.put(names[i].toLowerCase(), clazz);
-    }
-  }
-
-  /**
-   * Find the UDF class for given function name and check if it accepts the given input argument
-   * types. If a match is found, create a holder and return
-   * @param call
-   * @return
-   */
-  @Override
-  public HiveFuncHolder getFunction(FunctionCall call){
-    HiveFuncHolder holder;
-    MajorType[] argTypes = new MajorType[call.args.size()];
-    ObjectInspector[] argOIs = new ObjectInspector[call.args.size()];
-    for(int i=0; i<call.args.size(); i++) {
-      argTypes[i] = call.args.get(i).getMajorType();
-      argOIs[i] = ObjectInspectorHelper.getDrillObjectInspector(argTypes[i].getMinorType());
-    }
-
-    String funcName = call.getName().toLowerCase();
-
-    // search in GenericUDF list
-    for(Class<? extends GenericUDF> clazz: methodsGenericUDF.get(funcName)) {
-      holder = matchAndCreateGenericUDFHolder(clazz, argTypes, argOIs);
-      if(holder != null)
-        return holder;
-    }
-
-    // search in UDF list
-    for (Class<? extends UDF> clazz : methodsUDF.get(funcName)) {
-      holder = matchAndCreateUDFHolder(call.getName(), clazz, argTypes, argOIs);
-      if (holder != null)
-        return holder;
-    }
-
-    return null;
-  }
-
-  private HiveFuncHolder matchAndCreateGenericUDFHolder(Class<? extends GenericUDF> udfClazz,
-                                              MajorType[] argTypes,
-                                              ObjectInspector[] argOIs) {
-    // probe UDF to find if the arg types and acceptable
-    // if acceptable create a holder object
-    try {
-      GenericUDF udfInstance = udfClazz.newInstance();
-      ObjectInspector returnOI = udfInstance.initialize(argOIs);
-      return new HiveFuncHolder(
-        udfClazz,
-        argTypes,
-        returnOI,
-        Types.optional(ObjectInspectorHelper.getDrillType(returnOI)),
-        nonDeterministicUDFs.contains(udfClazz));
-    } catch(IllegalAccessException | InstantiationException e) {
-      logger.debug("Failed to instantiate class", e);
-    } catch(Exception e) { /*ignore this*/ }
-
-    return null;
-  }
-
-  private HiveFuncHolder matchAndCreateUDFHolder(String udfName,
-                                                 Class<? extends UDF> udfClazz,
-                                                 MajorType[] argTypes,
-                                                 ObjectInspector[] argOIs) {
-    try {
-      GenericUDF udfInstance = new GenericUDFBridge(udfName, false/* is operator */, udfClazz);
-      ObjectInspector returnOI = udfInstance.initialize(argOIs);
-
-      return new HiveFuncHolder(
-        udfName,
-        udfClazz,
-        argTypes,
-        returnOI,
-        Types.optional(ObjectInspectorHelper.getDrillType(returnOI)),
-        nonDeterministicUDFs.contains(udfClazz));
-    } catch(Exception e) { /*ignore this*/ }
-
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/AbstractPrimitiveObjectInspector.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/AbstractPrimitiveObjectInspector.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/AbstractPrimitiveObjectInspector.java
deleted file mode 100644
index 04b552e..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/AbstractPrimitiveObjectInspector.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.expr.fn.impl.hive;
-
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.BaseTypeParams;
-
-
-public abstract class AbstractPrimitiveObjectInspector implements PrimitiveObjectInspector {
-
-  @Override
-  public Class<?> getPrimitiveWritableClass() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Object getPrimitiveWritableObject(Object o) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Class<?> getJavaPrimitiveClass() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Object getPrimitiveJavaObject(Object o) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Object copyObject(Object o) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public boolean preferWritable() {
-    return false;
-  }
-
-  @Override
-  public BaseTypeParams getTypeParams() {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void setTypeParams(BaseTypeParams baseTypeParams) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public Category getCategory() {
-    return Category.PRIMITIVE;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/DrillDeferredObject.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/DrillDeferredObject.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/DrillDeferredObject.java
deleted file mode 100644
index fbc5f05..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/DrillDeferredObject.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.expr.fn.impl.hive;
-
-import org.apache.drill.exec.expr.holders.ValueHolder;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
-
-public class DrillDeferredObject implements GenericUDF.DeferredObject {
-  public ValueHolder valueHolder;
-
-  @Override
-  public void prepare(int version) throws HiveException {}
-
-  @Override
-  public Object get() throws HiveException {
-    return valueHolder;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java
deleted file mode 100644
index 71860c3..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.drill.exec.planner.sql;
-
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.sql.SqlCall;
-import org.eigenbase.sql.SqlCallBinding;
-import org.eigenbase.sql.SqlFunction;
-import org.eigenbase.sql.SqlFunctionCategory;
-import org.eigenbase.sql.SqlIdentifier;
-import org.eigenbase.sql.SqlOperandCountRange;
-import org.eigenbase.sql.SqlOperator;
-import org.eigenbase.sql.parser.SqlParserPos;
-import org.eigenbase.sql.type.SqlOperandCountRanges;
-import org.eigenbase.sql.type.SqlOperandTypeChecker;
-import org.eigenbase.sql.type.SqlTypeName;
-import org.eigenbase.sql.validate.SqlValidator;
-import org.eigenbase.sql.validate.SqlValidatorScope;
-
-public class HiveUDFOperator extends SqlFunction {
-
-  public HiveUDFOperator(String name) {
-    super(new SqlIdentifier(name, SqlParserPos.ZERO), DynamicReturnType.INSTANCE, null, new ArgChecker(), null,
-        SqlFunctionCategory.USER_DEFINED_FUNCTION);
-  }
-
-  @Override
-  public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
-    return validator.getTypeFactory().createSqlType(SqlTypeName.ANY);
-  }
-
-  /** Argument Checker for variable number of arguments */
-  public static class ArgChecker implements SqlOperandTypeChecker {
-
-    public static ArgChecker INSTANCE = new ArgChecker();
-
-    private SqlOperandCountRange range = SqlOperandCountRanges.any();
-
-    @Override
-    public boolean checkOperandTypes(SqlCallBinding callBinding, boolean throwOnFailure) {
-      return true;
-    }
-
-    @Override
-    public SqlOperandCountRange getOperandCountRange() {
-      return range;
-    }
-
-    @Override
-    public String getAllowedSignatures(SqlOperator op, String opName) {
-      return opName + "(HiveUDF - Opaque)";
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchLoader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchLoader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchLoader.java
index 33bcb3b..ce58f3b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchLoader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchLoader.java
@@ -23,8 +23,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
-import javax.jdo.metadata.FieldMetadata;
-
 import io.netty.buffer.EmptyByteBuf;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.exec.exception.SchemaChangeException;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/record/WritableBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/WritableBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/WritableBatch.java
index 480b34b..97f665a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/WritableBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/WritableBatch.java
@@ -22,8 +22,6 @@ import io.netty.buffer.CompositeByteBuf;
 
 import java.util.List;
 
-import javax.jdo.metadata.FieldMetadata;
-
 import org.apache.drill.exec.proto.UserBitShared.RecordBatchDef;
 import org.apache.drill.exec.proto.UserBitShared.SerializedField;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveFieldConverter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveFieldConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveFieldConverter.java
deleted file mode 100644
index 5095d90..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveFieldConverter.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.util.Map;
-
-import org.apache.drill.exec.vector.NullableBigIntVector;
-import org.apache.drill.exec.vector.NullableBitVector;
-import org.apache.drill.exec.vector.NullableDateVector;
-import org.apache.drill.exec.vector.NullableFloat4Vector;
-import org.apache.drill.exec.vector.NullableFloat8Vector;
-import org.apache.drill.exec.vector.NullableIntVector;
-import org.apache.drill.exec.vector.NullableSmallIntVector;
-import org.apache.drill.exec.vector.NullableTimeStampVector;
-import org.apache.drill.exec.vector.NullableTinyIntVector;
-import org.apache.drill.exec.vector.NullableVarBinaryVector;
-import org.apache.drill.exec.vector.NullableVarCharVector;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.hadoop.hive.common.type.HiveDecimal;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.FloatObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveVarcharObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.io.Text;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
-
-import com.google.common.collect.Maps;
-
-public abstract class HiveFieldConverter {
-
-  public abstract boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex);
-
-  private static Map<PrimitiveCategory, Class< ? extends HiveFieldConverter>> primMap = Maps.newHashMap();
-
-  static {
-    primMap.put(PrimitiveCategory.BINARY, Binary.class);
-    primMap.put(PrimitiveCategory.BOOLEAN, Boolean.class);
-    primMap.put(PrimitiveCategory.BYTE, Byte.class);
-    primMap.put(PrimitiveCategory.DECIMAL, Decimal.class);
-    primMap.put(PrimitiveCategory.DOUBLE, Double.class);
-    primMap.put(PrimitiveCategory.FLOAT, Float.class);
-    primMap.put(PrimitiveCategory.INT, Int.class);
-    primMap.put(PrimitiveCategory.LONG, Long.class);
-    primMap.put(PrimitiveCategory.SHORT, Short.class);
-    primMap.put(PrimitiveCategory.STRING, String.class);
-    primMap.put(PrimitiveCategory.VARCHAR, VarChar.class);
-    primMap.put(PrimitiveCategory.TIMESTAMP, Timestamp.class);
-    primMap.put(PrimitiveCategory.DATE, Date.class);
-  }
-
-
-  public static HiveFieldConverter create(TypeInfo typeInfo) throws IllegalAccessException, InstantiationException {
-    switch (typeInfo.getCategory()) {
-      case PRIMITIVE:
-        final PrimitiveCategory pCat = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
-        Class< ? extends HiveFieldConverter> clazz = primMap.get(pCat);
-        if (clazz != null)
-          return clazz.newInstance();
-
-        HiveRecordReader.throwUnsupportedHiveDataTypeError(pCat.toString());
-        break;
-
-      case LIST:
-      case MAP:
-      case STRUCT:
-      case UNION:
-      default:
-        HiveRecordReader.throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
-    }
-
-    return null;
-  }
-
-  public static class Binary extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final byte[] value = ((BinaryObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      return ((NullableVarBinaryVector) outputVV).getMutator().setSafe(outputIndex, value, 0, value.length);
-    }
-  }
-
-  public static class Boolean extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final boolean value = (boolean) ((BooleanObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      return ((NullableBitVector) outputVV).getMutator().setSafe(outputIndex, value ? 1 : 0);
-    }
-  }
-
-  public static class Byte extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final byte value = (byte) ((ByteObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      return ((NullableTinyIntVector) outputVV).getMutator().setSafe(outputIndex, value);
-    }
-  }
-
-  public static class Decimal extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final HiveDecimal value = ((HiveDecimalObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      final byte[] strBytes = value.toString().getBytes();
-      return ((NullableVarCharVector) outputVV).getMutator().setSafe(outputIndex, strBytes, 0, strBytes.length);
-    }
-  }
-
-  public static class Double extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final double value = (double) ((DoubleObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      return ((NullableFloat8Vector) outputVV).getMutator().setSafe(outputIndex, value);
-    }
-  }
-
-  public static class Float extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final float value = (float) ((FloatObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      return ((NullableFloat4Vector) outputVV).getMutator().setSafe(outputIndex, value);
-    }
-  }
-
-  public static class Int extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final int value = (int) ((IntObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      return ((NullableIntVector) outputVV).getMutator().setSafe(outputIndex, value);
-    }
-  }
-
-  public static class Long extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final long value = (long) ((LongObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      return ((NullableBigIntVector) outputVV).getMutator().setSafe(outputIndex, value);
-    }
-  }
-
-  public static class Short extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final short value = (short) ((ShortObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      return ((NullableSmallIntVector) outputVV).getMutator().setSafe(outputIndex, value);
-    }
-  }
-
-  public static class String extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final Text value = ((StringObjectInspector)oi).getPrimitiveWritableObject(hiveFieldValue);
-      final byte[] valueBytes = value.getBytes();
-      final int len = value.getLength();
-      return ((NullableVarCharVector) outputVV).getMutator().setSafe(outputIndex, valueBytes, 0, len);
-    }
-  }
-
-  public static class VarChar extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final Text value = ((HiveVarcharObjectInspector)oi).getPrimitiveWritableObject(hiveFieldValue).getTextValue();
-      final byte[] valueBytes = value.getBytes();
-      final int valueLen = value.getLength();
-      return ((NullableVarCharVector) outputVV).getMutator().setSafe(outputIndex, valueBytes, 0, valueLen);
-    }
-  }
-
-  public static class Timestamp extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final java.sql.Timestamp value = ((TimestampObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      final DateTime ts = new DateTime(value.getTime()).withZoneRetainFields(DateTimeZone.UTC);
-      return ((NullableTimeStampVector) outputVV).getMutator().setSafe(outputIndex, ts.getMillis());
-    }
-  }
-
-  public static class Date extends HiveFieldConverter {
-    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
-      final java.sql.Date value = ((DateObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
-      final DateTime date = new DateTime(value.getTime()).withZoneRetainFields(DateTimeZone.UTC);
-      return ((NullableDateVector) outputVV).getMutator().setSafe(outputIndex, date.getMillis());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java
deleted file mode 100644
index 32f793e..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.util.List;
-
-import net.hydromatic.optiq.Schema.TableType;
-
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.collect.Lists;
-
-public class HiveReadEntry {
-
-  @JsonProperty("table")
-  public HiveTable table;
-  @JsonProperty("partitions")
-  public List<HiveTable.HivePartition> partitions;
-
-  @JsonIgnore
-  private List<Partition> partitionsUnwrapped = Lists.newArrayList();
-
-  @JsonCreator
-  public HiveReadEntry(@JsonProperty("table") HiveTable table, @JsonProperty("partitions") List<HiveTable.HivePartition> partitions) {
-    this.table = table;
-    this.partitions = partitions;
-    if (partitions != null) {
-      for(HiveTable.HivePartition part : partitions) {
-        partitionsUnwrapped.add(part.getPartition());
-      }
-    }
-  }
-
-  @JsonIgnore
-  public Table getTable() {
-    return table.getTable();
-  }
-
-  @JsonIgnore
-  public List<Partition> getPartitions() {
-    return partitionsUnwrapped;
-  }
-
-  @JsonIgnore
-  public TableType getJdbcTableType() {
-    if (table.getTable().getTableType().equals(org.apache.hadoop.hive.metastore.TableType.VIRTUAL_VIEW.toString())) {
-      return TableType.VIEW;
-    }
-
-    return TableType.TABLE;
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
deleted file mode 100644
index c062f8c..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
+++ /dev/null
@@ -1,542 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.io.IOException;
-import java.sql.Timestamp;
-import java.sql.Date;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.drill.common.exceptions.DrillRuntimeException;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.TypeProtos.MajorType;
-import org.apache.drill.common.types.TypeProtos.MinorType;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.exception.SchemaChangeException;
-import org.apache.drill.exec.expr.TypeHelper;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.physical.impl.OutputMutator;
-import org.apache.drill.exec.record.MaterializedField;
-import org.apache.drill.exec.store.RecordReader;
-import org.apache.drill.exec.vector.BigIntVector;
-import org.apache.drill.exec.vector.BitVector;
-import org.apache.drill.exec.vector.Float4Vector;
-import org.apache.drill.exec.vector.Float8Vector;
-import org.apache.drill.exec.vector.IntVector;
-import org.apache.drill.exec.vector.TimeStampVector;
-import org.apache.drill.exec.vector.DateVector;
-import org.apache.drill.exec.vector.SmallIntVector;
-import org.apache.drill.exec.vector.TinyIntVector;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.exec.vector.VarBinaryVector;
-import org.apache.drill.exec.vector.VarCharVector;
-import org.apache.drill.exec.vector.allocator.VectorAllocator;
-import org.apache.hadoop.hive.common.type.HiveDecimal;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
-import org.apache.hadoop.hive.serde2.SerDe;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
-import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.Reporter;
-
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
-
-import com.google.common.collect.Lists;
-
-public class HiveRecordReader implements RecordReader {
-
-  protected Table table;
-  protected Partition partition;
-  protected InputSplit inputSplit;
-  protected FragmentContext context;
-  protected List<SchemaPath> projectedColumns;
-  protected List<String> selectedColumnNames;
-  protected List<TypeInfo> selectedColumnTypes = Lists.newArrayList();
-  protected List<ObjectInspector> selectedColumnObjInspectors = Lists.newArrayList();
-  protected List<HiveFieldConverter> selectedColumnFieldConverters = Lists.newArrayList();
-  protected List<String> selectedPartitionNames = Lists.newArrayList();
-  protected List<TypeInfo> selectedPartitionTypes = Lists.newArrayList();
-  protected List<Object> selectedPartitionValues = Lists.newArrayList();
-  protected List<String> tableColumns; // all columns in table (not including partition columns)
-  protected SerDe serde;
-  protected StructObjectInspector sInspector;
-  protected Object key, value;
-  protected org.apache.hadoop.mapred.RecordReader reader;
-  protected List<ValueVector> vectors = Lists.newArrayList();
-  protected List<ValueVector> pVectors = Lists.newArrayList();
-  protected Object redoRecord;
-  protected boolean empty;
-
-  protected static final int TARGET_RECORD_COUNT = 4000;
-  protected static final int FIELD_SIZE = 50;
-
-  public HiveRecordReader(Table table, Partition partition, InputSplit inputSplit, List<SchemaPath> projectedColumns,
-      FragmentContext context) throws ExecutionSetupException {
-    this.table = table;
-    this.partition = partition;
-    this.inputSplit = inputSplit;
-    this.context = context;
-    this.projectedColumns = projectedColumns;
-    this.empty = (inputSplit == null && partition == null);
-    init();
-  }
-
-  private void init() throws ExecutionSetupException {
-    Properties properties;
-    JobConf job = new JobConf();
-    if (partition != null) {
-      properties = MetaStoreUtils.getPartitionMetadata(partition, table);
-
-      // SerDe expects properties from Table, but above call doesn't add Table properties.
-      // Include Table properties in final list in order to not to break SerDes that depend on
-      // Table properties. For example AvroSerDe gets the schema from properties (passed as second argument)
-      for (Map.Entry<String, String> entry : table.getParameters().entrySet()) {
-        if (entry.getKey() != null && entry.getKey() != null) {
-          properties.put(entry.getKey(), entry.getValue());
-        }
-      }
-    } else {
-      properties = MetaStoreUtils.getTableMetadata(table);
-    }
-    for (Object obj : properties.keySet()) {
-      job.set((String) obj, (String) properties.get(obj));
-    }
-    InputFormat format;
-    String sLib = (partition == null) ? table.getSd().getSerdeInfo().getSerializationLib() : partition.getSd().getSerdeInfo().getSerializationLib();
-    String inputFormatName = (partition == null) ? table.getSd().getInputFormat() : partition.getSd().getInputFormat();
-    try {
-      format = (InputFormat) Class.forName(inputFormatName).getConstructor().newInstance();
-      Class c = Class.forName(sLib);
-      serde = (SerDe) c.getConstructor().newInstance();
-      serde.initialize(job, properties);
-    } catch (ReflectiveOperationException | SerDeException e) {
-      throw new ExecutionSetupException("Unable to instantiate InputFormat", e);
-    }
-    job.setInputFormat(format.getClass());
-
-    List<FieldSchema> partitionKeys = table.getPartitionKeys();
-    List<String> partitionNames = Lists.newArrayList();
-    for (FieldSchema field : partitionKeys) {
-      partitionNames.add(field.getName());
-    }
-
-    try {
-      ObjectInspector oi = serde.getObjectInspector();
-      if (oi.getCategory() != ObjectInspector.Category.STRUCT) {
-        throw new UnsupportedOperationException(String.format("%s category not supported", oi.getCategory()));
-      }
-      sInspector = (StructObjectInspector) oi;
-      StructTypeInfo sTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromObjectInspector(sInspector);
-      if (projectedColumns == null) {
-        selectedColumnNames = sTypeInfo.getAllStructFieldNames();
-        tableColumns = selectedColumnNames;
-      } else {
-        tableColumns = sTypeInfo.getAllStructFieldNames();
-        List<Integer> columnIds = Lists.newArrayList();
-        selectedColumnNames = Lists.newArrayList();
-        for (SchemaPath field : projectedColumns) {
-          String columnName = field.getRootSegment().getPath(); //TODO?
-          if (!tableColumns.contains(columnName)) {
-            if (partitionNames.contains(columnName)) {
-              selectedPartitionNames.add(columnName);
-            } else {
-              throw new ExecutionSetupException(String.format("Column %s does not exist", columnName));
-            }
-          } else {
-            columnIds.add(tableColumns.indexOf(columnName));
-            selectedColumnNames.add(columnName);
-          }
-        }
-        ColumnProjectionUtils.appendReadColumnIDs(job, columnIds);
-        ColumnProjectionUtils.appendReadColumnNames(job, selectedColumnNames);
-      }
-
-      for (String columnName : selectedColumnNames) {
-        ObjectInspector fieldOI = sInspector.getStructFieldRef(columnName).getFieldObjectInspector();
-        TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(fieldOI.getTypeName());
-
-        selectedColumnObjInspectors.add(fieldOI);
-        selectedColumnTypes.add(typeInfo);
-        selectedColumnFieldConverters.add(HiveFieldConverter.create(typeInfo));
-      }
-
-      if (projectedColumns == null) {
-        selectedPartitionNames = partitionNames;
-      }
-
-      for (int i = 0; i < table.getPartitionKeys().size(); i++) {
-        FieldSchema field = table.getPartitionKeys().get(i);
-        if (selectedPartitionNames.contains(field.getName())) {
-          TypeInfo pType = TypeInfoUtils.getTypeInfoFromTypeString(field.getType());
-          selectedPartitionTypes.add(pType);
-
-          if (partition != null) {
-            selectedPartitionValues.add(convertPartitionType(pType, partition.getValues().get(i)));
-          }
-        }
-      }
-    } catch (Exception e) {
-      throw new ExecutionSetupException("Failure while initializing HiveRecordReader: " + e.getMessage(), e);
-    }
-
-    if (!empty) {
-      try {
-        reader = format.getRecordReader(inputSplit, job, Reporter.NULL);
-      } catch (IOException e) {
-        throw new ExecutionSetupException("Failed to get o.a.hadoop.mapred.RecordReader from Hive InputFormat", e);
-      }
-      key = reader.createKey();
-      value = reader.createValue();
-    }
-  }
-
-  @Override
-  public void setup(OutputMutator output) throws ExecutionSetupException {
-    try {
-      for (int i = 0; i < selectedColumnNames.size(); i++) {
-        MajorType type = Types.optional(getMinorTypeFromHiveTypeInfo(selectedColumnTypes.get(i)));
-        MaterializedField field = MaterializedField.create(SchemaPath.getSimplePath(selectedColumnNames.get(i)), type);
-        Class vvClass = TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode());
-        vectors.add(output.addField(field, vvClass));
-      }
-
-      for (int i = 0; i < selectedPartitionNames.size(); i++) {
-        MajorType type = Types.required(getMinorTypeFromHiveTypeInfo(selectedPartitionTypes.get(i)));
-        MaterializedField field = MaterializedField.create(SchemaPath.getSimplePath(selectedPartitionNames.get(i)), type);
-        Class vvClass = TypeHelper.getValueVectorClass(field.getType().getMinorType(), field.getDataMode());
-        pVectors.add(output.addField(field, vvClass));
-      }
-    } catch(SchemaChangeException e) {
-      throw new ExecutionSetupException(e);
-    }
-  }
-
-  @Override
-  public int next() {
-    if (empty) {
-      return 0;
-    }
-
-    for (ValueVector vv : vectors) {
-      VectorAllocator.getAllocator(vv, FIELD_SIZE).alloc(TARGET_RECORD_COUNT);
-    }
-
-    try {
-      int recordCount = 0;
-
-      if (redoRecord != null) {
-        // Try writing the record that didn't fit into the last RecordBatch
-        Object deSerializedValue = serde.deserialize((Writable) redoRecord);
-        boolean status = readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, recordCount);
-        if (!status) {
-          throw new DrillRuntimeException("Current record is too big to fit into allocated ValueVector buffer");
-        }
-        redoRecord = null;
-        recordCount++;
-      }
-
-      while (recordCount < TARGET_RECORD_COUNT && reader.next(key, value)) {
-        Object deSerializedValue = serde.deserialize((Writable) value);
-        boolean status = readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, recordCount);
-        if (!status) {
-          redoRecord = value;
-          setValueCountAndPopulatePartitionVectors(recordCount);
-          return recordCount;
-        }
-        recordCount++;
-      }
-
-      setValueCountAndPopulatePartitionVectors(recordCount);
-      return recordCount;
-    } catch (IOException | SerDeException e) {
-      throw new DrillRuntimeException(e);
-    }
-  }
-
-  private boolean readHiveRecordAndInsertIntoRecordBatch(Object deSerializedValue, int outputRecordIndex) {
-    boolean success;
-    for (int i = 0; i < selectedColumnNames.size(); i++) {
-      String columnName = selectedColumnNames.get(i);
-      Object hiveValue = sInspector.getStructFieldData(deSerializedValue, sInspector.getStructFieldRef(columnName));
-
-      if (hiveValue != null) {
-        success = selectedColumnFieldConverters.get(i).setSafeValue(selectedColumnObjInspectors.get(i), hiveValue,
-            vectors.get(i), outputRecordIndex);
-
-        if (!success) {
-          return false;
-        }
-      }
-    }
-
-    return true;
-  }
-
-  private void setValueCountAndPopulatePartitionVectors(int recordCount) {
-    for (ValueVector v : vectors) {
-      v.getMutator().setValueCount(recordCount);
-    }
-
-    if (partition != null) {
-      populatePartitionVectors(recordCount);
-    }
-  }
-
-  @Override
-  public void cleanup() {
-  }
-
-  public static MinorType getMinorTypeFromHivePrimitiveTypeInfo(PrimitiveTypeInfo primitiveTypeInfo) {
-    switch(primitiveTypeInfo.getPrimitiveCategory()) {
-      case BINARY:
-        return TypeProtos.MinorType.VARBINARY;
-      case BOOLEAN:
-        return TypeProtos.MinorType.BIT;
-      case BYTE:
-        return TypeProtos.MinorType.TINYINT;
-      case DECIMAL:
-        return TypeProtos.MinorType.VARCHAR;
-      case DOUBLE:
-        return TypeProtos.MinorType.FLOAT8;
-      case FLOAT:
-        return TypeProtos.MinorType.FLOAT4;
-      case INT:
-        return TypeProtos.MinorType.INT;
-      case LONG:
-        return TypeProtos.MinorType.BIGINT;
-      case SHORT:
-        return TypeProtos.MinorType.SMALLINT;
-      case STRING:
-      case VARCHAR:
-        return TypeProtos.MinorType.VARCHAR;
-      case TIMESTAMP:
-        return TypeProtos.MinorType.TIMESTAMP;
-      case DATE:
-        return TypeProtos.MinorType.DATE;
-    }
-
-    throwUnsupportedHiveDataTypeError(primitiveTypeInfo.getPrimitiveCategory().toString());
-    return null;
-  }
-
-  public static MinorType getMinorTypeFromHiveTypeInfo(TypeInfo typeInfo) {
-    switch (typeInfo.getCategory()) {
-      case PRIMITIVE:
-        return getMinorTypeFromHivePrimitiveTypeInfo(((PrimitiveTypeInfo) typeInfo));
-
-      case LIST:
-      case MAP:
-      case STRUCT:
-      case UNION:
-      default:
-        throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
-    }
-
-    return null;
-  }
-
-  protected void populatePartitionVectors(int recordCount) {
-    for (int i = 0; i < pVectors.size(); i++) {
-      int size = 50;
-      ValueVector vector = pVectors.get(i);
-      Object val = selectedPartitionValues.get(i);
-      PrimitiveCategory pCat = ((PrimitiveTypeInfo)selectedPartitionTypes.get(i)).getPrimitiveCategory();
-      if (pCat == PrimitiveCategory.BINARY || pCat == PrimitiveCategory.STRING || pCat == PrimitiveCategory.VARCHAR) {
-        size = ((byte[]) selectedPartitionValues.get(i)).length;
-      }
-
-      VectorAllocator.getAllocator(vector, size).alloc(recordCount);
-
-      switch(pCat) {
-        case BINARY: {
-          VarBinaryVector v = (VarBinaryVector) vector;
-          byte[] value = (byte[]) val;
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        case BOOLEAN: {
-          BitVector v = (BitVector) vector;
-          Boolean value = (Boolean) val;
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().set(j, value ? 1 : 0);
-          }
-          break;
-        }
-        case BYTE: {
-          TinyIntVector v = (TinyIntVector) vector;
-          byte value = (byte) val;
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        case DOUBLE: {
-          Float8Vector v = (Float8Vector) vector;
-          double value = (double) val;
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        case FLOAT: {
-          Float4Vector v = (Float4Vector) vector;
-          float value = (float) val;
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        case INT: {
-          IntVector v = (IntVector) vector;
-          int value = (int) val;
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        case LONG: {
-          BigIntVector v = (BigIntVector) vector;
-          long value = (long) val;
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        case SHORT: {
-          SmallIntVector v = (SmallIntVector) vector;
-          short value = (short) val;
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        case VARCHAR:
-        case STRING: {
-          VarCharVector v = (VarCharVector) vector;
-          byte[] value = (byte[]) val;
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        case TIMESTAMP: {
-          TimeStampVector v = (TimeStampVector) vector;
-          DateTime ts = new DateTime(((Timestamp) val).getTime()).withZoneRetainFields(DateTimeZone.UTC);
-          long value = ts.getMillis();
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        case DATE: {
-          DateVector v = (DateVector) vector;
-          DateTime date = new DateTime(((Date)val).getTime()).withZoneRetainFields(DateTimeZone.UTC);
-          long value = date.getMillis();
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        case DECIMAL: {
-          VarCharVector v = (VarCharVector) vector;
-          byte[] value = ((HiveDecimal) val).toString().getBytes();
-          for (int j = 0; j < recordCount; j++) {
-            v.getMutator().setSafe(j, value);
-          }
-          break;
-        }
-        default:
-          throwUnsupportedHiveDataTypeError(pCat.toString());
-      }
-      vector.getMutator().setValueCount(recordCount);
-    }
-  }
-
-  /** Partition value is received in string format. Convert it into appropriate object based on the type. */
-  private Object convertPartitionType(TypeInfo typeInfo, String value) {
-    if (typeInfo.getCategory() != Category.PRIMITIVE) {
-      // In Hive only primitive types are allowed as partition column types.
-      throw new DrillRuntimeException("Non-Primitive types are not allowed as partition column type in Hive, " +
-          "but received one: " + typeInfo.getCategory());
-    }
-
-    PrimitiveCategory pCat = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
-    switch (pCat) {
-      case BINARY:
-        return value.getBytes();
-      case BOOLEAN:
-        return Boolean.parseBoolean(value);
-      case BYTE:
-        return Byte.parseByte(value);
-      case DECIMAL:
-        return new HiveDecimal(value);
-      case DOUBLE:
-        return Double.parseDouble(value);
-      case FLOAT:
-        return Float.parseFloat(value);
-      case INT:
-        return Integer.parseInt(value);
-      case LONG:
-        return Long.parseLong(value);
-      case SHORT:
-        return Short.parseShort(value);
-      case STRING:
-      case VARCHAR:
-        return value.getBytes();
-      case TIMESTAMP:
-        return Timestamp.valueOf(value);
-      case DATE:
-        return Date.valueOf(value);
-    }
-
-    throwUnsupportedHiveDataTypeError(pCat.toString());
-    return null;
-  }
-
-  public static void throwUnsupportedHiveDataTypeError(String unsupportedType) {
-    StringBuilder errMsg = new StringBuilder();
-    errMsg.append(String.format("Unsupported Hive data type %s. ", unsupportedType));
-    errMsg.append(System.getProperty("line.separator"));
-    errMsg.append("Following Hive data types are supported in Drill for querying: ");
-    errMsg.append(
-        "BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, DATE, TIMESTAMP, BINARY, DECIMAL, STRING, and VARCHAR");
-
-    throw new RuntimeException(errMsg.toString());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
deleted file mode 100644
index 2f217d9..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
+++ /dev/null
@@ -1,296 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.drill.common.exceptions.DrillRuntimeException;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.exec.physical.EndpointAffinity;
-import org.apache.drill.exec.physical.base.AbstractGroupScan;
-import org.apache.drill.exec.physical.base.GroupScan;
-import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.physical.base.ScanStats;
-import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.physical.base.ScanStats.GroupScanProperty;
-import org.apache.drill.exec.proto.CoordinationProtos;
-import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
-import org.apache.drill.exec.store.StoragePluginRegistry;
-import org.apache.drill.exec.store.schedule.CompleteFileWork;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.mapred.FileInputFormat;
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobConf;
-
-import com.fasterxml.jackson.annotation.JacksonInject;
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonTypeName;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.io.ByteArrayDataOutput;
-import com.google.common.io.ByteStreams;
-
-@JsonTypeName("hive-scan")
-public class HiveScan extends AbstractGroupScan {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveScan.class);
-
-  @JsonProperty("hive-table")
-  public HiveReadEntry hiveReadEntry;
-  @JsonIgnore
-  private final Table table;
-  @JsonIgnore
-  private List<InputSplit> inputSplits = Lists.newArrayList();
-  @JsonIgnore
-  public HiveStoragePlugin storagePlugin;
-  @JsonProperty("storage-plugin")
-  public String storagePluginName;
-
-  @JsonIgnore
-  public List<Partition> partitions;
-  @JsonIgnore
-  private final Collection<DrillbitEndpoint> endpoints;
-
-  @JsonProperty("columns")
-  public List<SchemaPath> columns;
-
-  @JsonIgnore
-  List<List<InputSplit>> mappings;
-
-  @JsonIgnore
-  Map<InputSplit, Partition> partitionMap = new HashMap();
-
-  @JsonCreator
-  public HiveScan(@JsonProperty("hive-table") HiveReadEntry hiveReadEntry, @JsonProperty("storage-plugin") String storagePluginName,
-      @JsonProperty("columns") List<SchemaPath> columns,
-      @JacksonInject StoragePluginRegistry pluginRegistry) throws ExecutionSetupException {
-    this.hiveReadEntry = hiveReadEntry;
-    this.table = hiveReadEntry.getTable();
-    this.storagePluginName = storagePluginName;
-    this.storagePlugin = (HiveStoragePlugin) pluginRegistry.getPlugin(storagePluginName);
-    this.columns = columns;
-    this.partitions = hiveReadEntry.getPartitions();
-    getSplits();
-    endpoints = storagePlugin.getContext().getBits();
-  }
-
-  public HiveScan(HiveReadEntry hiveReadEntry, HiveStoragePlugin storagePlugin, List<SchemaPath> columns) throws ExecutionSetupException {
-    this.table = hiveReadEntry.getTable();
-    this.hiveReadEntry = hiveReadEntry;
-    this.columns = columns;
-    this.partitions = hiveReadEntry.getPartitions();
-    getSplits();
-    endpoints = storagePlugin.getContext().getBits();
-    this.storagePluginName = storagePlugin.getName();
-  }
-
-  private HiveScan(HiveScan that) {
-    this.columns = that.columns;
-    this.endpoints = that.endpoints;
-    this.hiveReadEntry = that.hiveReadEntry;
-    this.inputSplits = that.inputSplits;
-    this.mappings = that.mappings;
-    this.partitionMap = that.partitionMap;
-    this.partitions = that.partitions;
-    this.storagePlugin = that.storagePlugin;
-    this.storagePluginName = that.storagePluginName;
-    this.table = that.table;
-  }
-
-  public List<SchemaPath> getColumns() {
-    return columns;
-  }
-
-  private void getSplits() throws ExecutionSetupException {
-    try {
-      if (partitions == null || partitions.size() == 0) {
-        Properties properties = MetaStoreUtils.getTableMetadata(table);
-        JobConf job = new JobConf();
-        for (Object obj : properties.keySet()) {
-          job.set((String) obj, (String) properties.get(obj));
-        }
-        InputFormat<?, ?> format = (InputFormat<?, ?>) Class.forName(table.getSd().getInputFormat()).getConstructor().newInstance();
-        job.setInputFormat(format.getClass());
-        Path path = new Path(table.getSd().getLocation());
-        FileInputFormat.addInputPath(job, path);
-        format = job.getInputFormat();
-        for (InputSplit split : format.getSplits(job, 1)) {
-          inputSplits.add(split);
-        }
-        for (InputSplit split : inputSplits) {
-          partitionMap.put(split, null);
-        }
-      } else {
-        for (Partition partition : partitions) {
-          Properties properties = MetaStoreUtils.getPartitionMetadata(partition, table);
-          JobConf job = new JobConf();
-          for (Object obj : properties.keySet()) {
-            job.set((String) obj, (String) properties.get(obj));
-          }
-          InputFormat<?, ?> format = (InputFormat<?, ?>) Class.forName(partition.getSd().getInputFormat()).getConstructor().newInstance();
-          job.setInputFormat(format.getClass());
-          FileInputFormat.addInputPath(job, new Path(partition.getSd().getLocation()));
-          format = job.getInputFormat();
-          InputSplit[] splits = format.getSplits(job,1);
-          for (InputSplit split : splits) {
-            inputSplits.add(split);
-            partitionMap.put(split, partition);
-          }
-        }
-      }
-    } catch (ReflectiveOperationException | IOException e) {
-      throw new ExecutionSetupException(e);
-    }
-  }
-
-  @Override
-  public void applyAssignments(List<CoordinationProtos.DrillbitEndpoint> endpoints) {
-    mappings = Lists.newArrayList();
-    for (int i = 0; i < endpoints.size(); i++) {
-      mappings.add(new ArrayList<InputSplit>());
-    }
-    int count = endpoints.size();
-    for (int i = 0; i < inputSplits.size(); i++) {
-      mappings.get(i % count).add(inputSplits.get(i));
-    }
-  }
-
-  public static String serializeInputSplit(InputSplit split) throws IOException {
-    ByteArrayDataOutput byteArrayOutputStream =  ByteStreams.newDataOutput();
-    split.write(byteArrayOutputStream);
-    String encoded = Base64.encodeBase64String(byteArrayOutputStream.toByteArray());
-    logger.debug("Encoded split string for split {} : {}", split, encoded);
-    return encoded;
-  }
-
-  @Override
-  public SubScan getSpecificScan(int minorFragmentId) throws ExecutionSetupException {
-    try {
-      List<InputSplit> splits = mappings.get(minorFragmentId);
-      List<Partition> parts = Lists.newArrayList();
-      List<String> encodedInputSplits = Lists.newArrayList();
-      List<String> splitTypes = Lists.newArrayList();
-      for (InputSplit split : splits) {
-        parts.add(partitionMap.get(split));
-        encodedInputSplits.add(serializeInputSplit(split));
-        splitTypes.add(split.getClass().getCanonicalName());
-      }
-      if (parts.contains(null)) parts = null;
-      return new HiveSubScan(encodedInputSplits, hiveReadEntry, splitTypes, columns);
-    } catch (IOException | ReflectiveOperationException e) {
-      throw new ExecutionSetupException(e);
-    }
-  }
-
-  @Override
-  public int getMaxParallelizationWidth() {
-    return inputSplits.size();
-  }
-
-  @Override
-  public List<EndpointAffinity> getOperatorAffinity() {
-    Map<String, DrillbitEndpoint> endpointMap = new HashMap<>();
-    for (DrillbitEndpoint endpoint : endpoints) {
-      endpointMap.put(endpoint.getAddress(), endpoint);
-      logger.debug("endpoing address: {}", endpoint.getAddress());
-    }
-    Map<DrillbitEndpoint, EndpointAffinity> affinityMap = new HashMap<>();
-    try {
-      long totalSize = 0;
-      for (InputSplit split : inputSplits) {
-        totalSize += Math.max(1, split.getLength());
-      }
-      for (InputSplit split : inputSplits) {
-        float affinity = ((float) Math.max(1, split.getLength())) / totalSize;
-        for (String loc : split.getLocations()) {
-          logger.debug("split location: {}", loc);
-          DrillbitEndpoint endpoint = endpointMap.get(loc);
-          if (endpoint != null) {
-            if (affinityMap.containsKey(endpoint)) {
-              affinityMap.get(endpoint).addAffinity(affinity);
-            } else {
-              affinityMap.put(endpoint, new EndpointAffinity(endpoint, affinity));
-            }
-          }
-        }
-      }
-    } catch (IOException e) {
-      throw new DrillRuntimeException(e);
-    }
-    for (DrillbitEndpoint ep : affinityMap.keySet()) {
-      Preconditions.checkNotNull(ep);
-    }
-    for (EndpointAffinity a : affinityMap.values()) {
-      Preconditions.checkNotNull(a.getEndpoint());
-    }
-    return Lists.newArrayList(affinityMap.values());
-  }
-
-  @Override
-  public ScanStats getScanStats() {
-    try {
-      long data =0;
-      for(InputSplit split : inputSplits){
-          data += split.getLength();
-      }
-
-      long estRowCount = data/1024;
-      return new ScanStats(GroupScanProperty.NO_EXACT_ROW_COUNT, estRowCount, 1, data);
-    } catch (IOException e) {
-      throw new DrillRuntimeException(e);
-    }
-  }
-
-  @Override
-  public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) throws ExecutionSetupException {
-    return new HiveScan(this);
-  }
-
-  @Override
-  public String getDigest() {
-    return toString();
-  }
-
-  @Override
-  public String toString() {
-    return "HiveScan [table=" + table
-        + ", inputSplits=" + inputSplits
-        + ", columns=" + columns + "]";
-  }
-
-  @Override
-  public GroupScan clone(List<SchemaPath> columns) {
-    HiveScan newScan = new HiveScan(this);
-    newScan.columns = columns;
-    return newScan;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java
deleted file mode 100644
index 6e540ad..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveScanBatchCreator.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import com.google.common.collect.Lists;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.physical.impl.BatchCreator;
-import org.apache.drill.exec.physical.impl.ScanBatch;
-import org.apache.drill.exec.record.RecordBatch;
-import org.apache.drill.exec.store.RecordReader;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.TextInputFormat;
-
-import java.util.List;
-
-public class HiveScanBatchCreator implements BatchCreator<HiveSubScan> {
-
-  @Override
-  public RecordBatch getBatch(FragmentContext context, HiveSubScan config, List<RecordBatch> children) throws ExecutionSetupException {
-    List<RecordReader> readers = Lists.newArrayList();
-    Table table = config.getTable();
-    List<InputSplit> splits = config.getInputSplits();
-    List<Partition> partitions = config.getPartitions();
-    boolean hasPartitions = (partitions != null && partitions.size() > 0);
-    int i = 0;
-
-    // Native hive text record reader doesn't handle all types currently. For now use HiveRecordReader which uses
-    // Hive InputFormat and SerDe classes to read the data.
-    //if (table.getSd().getInputFormat().equals(TextInputFormat.class.getCanonicalName()) &&
-    //        table.getSd().getSerdeInfo().getSerializationLib().equals(LazySimpleSerDe.class.getCanonicalName()) &&
-    //        config.getColumns() != null) {
-    //  for (InputSplit split : splits) {
-    //    readers.add(new HiveTextRecordReader(table,
-    //        (hasPartitions ? partitions.get(i++) : null),
-    //        split, config.getColumns(), context));
-    //  }
-    //} else {
-      for (InputSplit split : splits) {
-        readers.add(new HiveRecordReader(table,
-            (hasPartitions ? partitions.get(i++) : null),
-            split, config.getColumns(), context));
-      }
-    //}
-
-    // If there are no readers created (which is possible when the table is empty), create an empty RecordReader to
-    // output the schema
-    if (readers.size() == 0) {
-      readers.add(new HiveRecordReader(table, null, null, config.getColumns(), context));
-    }
-
-    return new ScanBatch(config, context, readers.iterator());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
deleted file mode 100644
index c5a6e2c..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePlugin.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.io.IOException;
-import java.util.List;
-
-import net.hydromatic.optiq.Schema;
-import net.hydromatic.optiq.Schema.TableType;
-import net.hydromatic.optiq.SchemaPlus;
-
-import org.apache.drill.common.JSONOptions;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.exec.rpc.user.UserSession;
-import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.store.AbstractStoragePlugin;
-import org.apache.drill.exec.store.hive.schema.HiveSchemaFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
-
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.ObjectMapper;
-
-public class HiveStoragePlugin extends AbstractStoragePlugin {
-
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveStoragePlugin.class);
-
-  private final HiveStoragePluginConfig config;
-  private final HiveSchemaFactory schemaFactory;
-  private final DrillbitContext context;
-  private final String name;
-
-  public HiveStoragePlugin(HiveStoragePluginConfig config, DrillbitContext context, String name) throws ExecutionSetupException {
-    this.config = config;
-    this.context = context;
-    this.schemaFactory = new HiveSchemaFactory(this, name, config.getHiveConf());
-    this.name = name;
-  }
-
-  public HiveStoragePluginConfig getConfig() {
-    return config;
-  }
-
-  public String getName(){
-    return name;
-  }
-
-  public DrillbitContext getContext() {
-    return context;
-  }
-
-  @Override
-  public HiveScan getPhysicalScan(JSONOptions selection, List<SchemaPath> columns) throws IOException {
-    HiveReadEntry hiveReadEntry = selection.getListWith(new ObjectMapper(), new TypeReference<HiveReadEntry>(){});
-    try {
-      if (hiveReadEntry.getJdbcTableType() == TableType.VIEW) {
-        throw new UnsupportedOperationException("Querying Hive views from Drill is not supported in current version.");
-      }
-
-      return new HiveScan(hiveReadEntry, this, null);   
-    } catch (ExecutionSetupException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void registerSchemas(UserSession session, SchemaPlus parent) {
-    schemaFactory.registerSchemas(session, parent);
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePluginConfig.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePluginConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePluginConfig.java
deleted file mode 100644
index cbd7906..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveStoragePluginConfig.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonTypeName;
-
-import org.apache.drill.common.logical.StoragePluginConfigBase;
-import org.apache.hadoop.hive.conf.HiveConf;
-
-import java.util.Map;
-
-@JsonTypeName(HiveStoragePluginConfig.NAME)
-public class HiveStoragePluginConfig extends StoragePluginConfigBase {
-  @JsonProperty
-  public Map<String, String> configProps;
-  @JsonIgnore
-  private HiveConf hiveConf;
-
-  public static final String NAME = "hive";
-
-  @JsonIgnore
-  public HiveConf getHiveConf() {
-    if (hiveConf == null) {
-      hiveConf = new HiveConf();
-      if (configProps != null) {
-        for (Map.Entry<String, String> entry : configProps.entrySet()) {
-          hiveConf.set(entry.getKey(), entry.getValue());
-        }
-      }
-    }
-
-    return hiveConf;
-  }
-
-  @JsonCreator
-  public HiveStoragePluginConfig(@JsonProperty("config") Map<String, String> props) {
-    this.configProps = props;
-  }
-
-  @Override
-  public int hashCode() {
-    return configProps != null ? configProps.hashCode() : 0;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    HiveStoragePluginConfig that = (HiveStoragePluginConfig) o;
-
-    if (configProps != null ? !configProps.equals(that.configProps) : that.configProps != null) return false;
-
-    return true;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
deleted file mode 100644
index c0e479a..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveSubScan.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.exec.physical.base.AbstractBase;
-import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.physical.base.SubScan;
-import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.InputSplit;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonTypeName;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
-import com.google.common.io.ByteArrayDataInput;
-import com.google.common.io.ByteStreams;
-
-@JsonTypeName("hive-sub-scan")
-public class HiveSubScan extends AbstractBase implements SubScan {
-
-  private List<String> splits;
-
-  private HiveReadEntry hiveReadEntry;
-
-  private List<String> splitClasses;
-
-  private List<SchemaPath> columns;
-
-  @JsonIgnore
-  private List<InputSplit> inputSplits = Lists.newArrayList();
-  @JsonIgnore
-  private Table table;
-  @JsonIgnore
-  private List<Partition> partitions;
-
-  @JsonCreator
-  public HiveSubScan(@JsonProperty("splits") List<String> splits,
-                     @JsonProperty("hiveReadEntry") HiveReadEntry hiveReadEntry,
-                     @JsonProperty("splitClasses") List<String> splitClasses,
-                     @JsonProperty("columns") List<SchemaPath> columns) throws IOException, ReflectiveOperationException {
-    this.hiveReadEntry = hiveReadEntry;
-    this.table = hiveReadEntry.getTable();
-    this.partitions = hiveReadEntry.getPartitions();
-    this.splits = splits;
-    this.splitClasses = splitClasses;
-    this.columns = columns;
-
-    for (int i = 0; i < splits.size(); i++) {
-      inputSplits.add(deserializeInputSplit(splits.get(i), splitClasses.get(i)));
-    }
-  }
-
-  public List<String> getSplits() {
-    return splits;
-  }
-
-  public Table getTable() {
-    return table;
-  }
-
-  public List<Partition> getPartitions() {
-    return partitions;
-  }
-
-  public List<String> getSplitClasses() {
-    return splitClasses;
-  }
-
-  public List<SchemaPath> getColumns() {
-    return columns;
-  }
-
-  public List<InputSplit> getInputSplits() {
-    return inputSplits;
-  }
-
-  public HiveReadEntry getHiveReadEntry() {
-    return hiveReadEntry;
-  }
-
-  public static InputSplit deserializeInputSplit(String base64, String className) throws IOException, ReflectiveOperationException{
-    InputSplit split;
-    if (Class.forName(className) == FileSplit.class) {
-      split = new FileSplit((Path) null, 0, 0, (String[])null);
-    } else {
-      split = (InputSplit) Class.forName(className).getConstructor().newInstance();
-    }
-    ByteArrayDataInput byteArrayDataInput = ByteStreams.newDataInput(Base64.decodeBase64(base64));
-    split.readFields(byteArrayDataInput);
-    return split;
-  }
-
-  @Override
-  public <T, X, E extends Throwable> T accept(PhysicalVisitor<T, X, E> physicalVisitor, X value) throws E {
-    return physicalVisitor.visitSubScan(this, value);
-  }
-
-  @Override
-  public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) throws ExecutionSetupException {
-    try {
-      return new HiveSubScan(splits, hiveReadEntry, splitClasses, columns);
-    } catch (IOException | ReflectiveOperationException e) {
-      throw new ExecutionSetupException(e);
-    }
-  }
-
-  @Override
-  public Iterator<PhysicalOperator> iterator() {
-    return Iterators.emptyIterator();
-  }
-
-  @Override
-  public int getOperatorType() {
-    return CoreOperatorType.HIVE_SUB_SCAN_VALUE;
-  }
-}


[28/32] DRILL-1024: Move hive storage code out of 'exec/java-exec' into 'contrib/storage-hive' module.

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
new file mode 100644
index 0000000..7e6b92b
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
@@ -0,0 +1,292 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive.schema;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.collect.ImmutableList;
+
+import net.hydromatic.optiq.Schema;
+import net.hydromatic.optiq.SchemaPlus;
+
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.exec.planner.logical.DrillTable;
+import org.apache.drill.exec.rpc.user.DrillUser;
+import org.apache.drill.exec.rpc.user.UserSession;
+import org.apache.drill.exec.store.AbstractSchema;
+import org.apache.drill.exec.store.SchemaFactory;
+import org.apache.drill.exec.store.hive.HiveReadEntry;
+import org.apache.drill.exec.store.hive.HiveStoragePlugin;
+import org.apache.drill.exec.store.hive.HiveStoragePluginConfig;
+import org.apache.drill.exec.store.hive.HiveTable;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.thrift.TException;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+public class HiveSchemaFactory implements SchemaFactory {
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveSchemaFactory.class);
+
+  private static final String DATABASES = "databases";
+
+  private final HiveMetaStoreClient mClient;
+  private LoadingCache<String, List<String>> databases;
+  private LoadingCache<String, List<String>> tableNameLoader;
+  private LoadingCache<String, LoadingCache<String, HiveReadEntry>> tableLoaders;
+  private HiveStoragePlugin plugin;
+  private final String schemaName;
+
+  public HiveSchemaFactory(HiveStoragePlugin plugin, String name, HiveConf hiveConf) throws ExecutionSetupException {
+    this.schemaName = name;
+    this.plugin = plugin;
+
+    try {
+      this.mClient = new HiveMetaStoreClient(hiveConf);
+    } catch (MetaException e) {
+      throw new ExecutionSetupException("Failure setting up Hive metastore client.", e);
+    }
+
+    databases = CacheBuilder //
+        .newBuilder() //
+        .expireAfterAccess(1, TimeUnit.MINUTES) //
+        .build(new DatabaseLoader());
+
+    tableNameLoader = CacheBuilder //
+        .newBuilder() //
+        .expireAfterAccess(1, TimeUnit.MINUTES) //
+        .build(new TableNameLoader());
+
+    tableLoaders = CacheBuilder //
+        .newBuilder() //
+        .expireAfterAccess(4, TimeUnit.HOURS) //
+        .maximumSize(20) //
+        .build(new TableLoaderLoader());
+  }
+
+  private class TableNameLoader extends CacheLoader<String, List<String>> {
+
+    @Override
+    public List<String> load(String dbName) throws Exception {
+      try {
+        return mClient.getAllTables(dbName);
+      } catch (TException e) {
+        logger.warn("Failure while attempting to get hive tables", e);
+        mClient.reconnect();
+        return mClient.getAllTables(dbName);
+      }
+    }
+
+  }
+
+  private class DatabaseLoader extends CacheLoader<String, List<String>> {
+
+    @Override
+    public List<String> load(String key) throws Exception {
+      if (!DATABASES.equals(key))
+        throw new UnsupportedOperationException();
+      try {
+        return mClient.getAllDatabases();
+      } catch (TException e) {
+        logger.warn("Failure while attempting to get hive tables", e);
+        mClient.reconnect();
+        return mClient.getAllDatabases();
+      }
+    }
+  }
+
+  private class TableLoaderLoader extends CacheLoader<String, LoadingCache<String, HiveReadEntry>> {
+
+    @Override
+    public LoadingCache<String, HiveReadEntry> load(String key) throws Exception {
+      return CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.MINUTES).build(new TableLoader(key));
+    }
+
+  }
+
+  private class TableLoader extends CacheLoader<String, HiveReadEntry> {
+
+    private final String dbName;
+
+    public TableLoader(String dbName) {
+      super();
+      this.dbName = dbName;
+    }
+
+    @Override
+    public HiveReadEntry load(String key) throws Exception {
+      Table t = null;
+      try {
+        t = mClient.getTable(dbName, key);
+      } catch (TException e) {
+        mClient.reconnect();
+        t = mClient.getTable(dbName, key);
+      }
+
+      if (t == null)
+        throw new UnknownTableException(String.format("Unable to find table '%s'.", key));
+
+      List<Partition> partitions = null;
+      try {
+        partitions = mClient.listPartitions(dbName, key, Short.MAX_VALUE);
+      } catch (TException e) {
+        mClient.reconnect();
+        partitions = mClient.listPartitions(dbName, key, Short.MAX_VALUE);
+      }
+
+      List<HiveTable.HivePartition> hivePartitions = Lists.newArrayList();
+      for (Partition part : partitions) {
+        hivePartitions.add(new HiveTable.HivePartition(part));
+      }
+
+      if (hivePartitions.size() == 0)
+        hivePartitions = null;
+      return new HiveReadEntry(new HiveTable(t), hivePartitions);
+
+    }
+
+  }
+
+  @Override
+  public void registerSchemas(UserSession session, SchemaPlus parent) {
+    HiveSchema schema = new HiveSchema(schemaName);
+    SchemaPlus hPlus = parent.add(schemaName, schema);
+    schema.setHolder(hPlus);
+  }
+
+  class HiveSchema extends AbstractSchema {
+
+    private HiveDatabaseSchema defaultSchema;
+
+    public HiveSchema(String name) {
+      super(ImmutableList.<String>of(), name);
+      getSubSchema("default");
+    }
+
+    @Override
+    public Schema getSubSchema(String name) {
+      List<String> tables;
+      try {
+        tables = tableNameLoader.get(name);
+        HiveDatabaseSchema schema = new HiveDatabaseSchema(tables, this, name);
+        if(name.equals("default")){
+          this.defaultSchema = schema;
+        }
+        return schema;
+      } catch (ExecutionException e) {
+        logger.warn("Failure while attempting to access HiveDatabase '{}'.", name, e.getCause());
+        return null;
+      }
+
+    }
+
+
+    void setHolder(SchemaPlus plusOfThis){
+      for(String s : getSubSchemaNames()){
+        plusOfThis.add(s, getSubSchema(s));
+      }
+    }
+
+    @Override
+    public boolean showInInformationSchema() {
+      return false;
+    }
+
+    @Override
+    public Set<String> getSubSchemaNames() {
+      try{
+        List<String> dbs = databases.get(DATABASES);
+        return Sets.newHashSet(dbs);
+      }catch(ExecutionException e){
+        logger.warn("Failure while getting Hive database list.", e);
+      }
+      return super.getSubSchemaNames();
+    }
+
+    @Override
+    public net.hydromatic.optiq.Table getTable(String name) {
+      if(defaultSchema == null){
+        return super.getTable(name);
+      }
+      return defaultSchema.getTable(name);
+    }
+
+    @Override
+    public Set<String> getTableNames() {
+      if(defaultSchema == null){
+        return super.getTableNames();
+      }
+      return defaultSchema.getTableNames();
+    }
+
+    List<String> getTableNames(String dbName){
+      try{
+        return tableNameLoader.get(dbName);
+      }catch(ExecutionException e){
+        logger.warn("Failure while loading table names for database '{}'.", dbName, e.getCause());
+        return Collections.emptyList();
+      }
+    }
+
+    DrillTable getDrillTable(String dbName, String t){
+      HiveReadEntry entry = getSelectionBaseOnName(dbName, t);
+      if(entry == null) return null;
+
+      if (entry.getJdbcTableType() == TableType.VIEW) {
+        return new DrillHiveViewTable(schemaName, plugin, entry);
+      } else {
+        return new DrillHiveTable(schemaName, plugin, entry);
+      }
+    }
+
+    HiveReadEntry getSelectionBaseOnName(String dbName, String t) {
+      if(dbName == null) dbName = "default";
+      try{
+        return tableLoaders.get(dbName).get(t);
+      }catch(ExecutionException e){
+        logger.warn("Exception occurred while trying to read table. {}.{}", dbName, t, e.getCause());
+        return null;
+      }
+    }
+
+    @Override
+    public AbstractSchema getDefaultSchema() {
+      return defaultSchema;
+    }
+
+    @Override
+    public String getTypeName() {
+      return HiveStoragePluginConfig.NAME;
+    }
+
+  }
+
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/resources/bootstrap-storage-plugins.json
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/resources/bootstrap-storage-plugins.json b/contrib/storage-hive/core/src/main/resources/bootstrap-storage-plugins.json
new file mode 100644
index 0000000..ac61ffd
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/resources/bootstrap-storage-plugins.json
@@ -0,0 +1,4 @@
+{
+  "storage":{
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/resources/drill-module.conf
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/resources/drill-module.conf b/contrib/storage-hive/core/src/main/resources/drill-module.conf
new file mode 100644
index 0000000..271395e
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/resources/drill-module.conf
@@ -0,0 +1,5 @@
+//  This file tells Drill to consider this module when class path scanning.
+//  This file can also include any supplementary configuration information.
+//  This file is in HOCON format, see https://github.com/typesafehub/config/blob/master/HOCON.md for more information.
+
+drill.logical.function.packages += "org.apache.hadoop.hive"

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/fn/hive/TestHiveUDFs.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/fn/hive/TestHiveUDFs.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/fn/hive/TestHiveUDFs.java
new file mode 100644
index 0000000..e8c58d1
--- /dev/null
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/fn/hive/TestHiveUDFs.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.fn.hive;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.drill.BaseTestQuery;
+import org.apache.drill.exec.record.RecordBatchLoader;
+import org.apache.drill.exec.rpc.user.QueryResultBatch;
+import org.apache.drill.exec.vector.Float4Vector;
+import org.apache.drill.exec.vector.NullableBigIntVector;
+import org.apache.drill.exec.vector.NullableFloat8Vector;
+import org.apache.drill.exec.vector.NullableIntVector;
+import org.apache.drill.exec.vector.NullableVar16CharVector;
+import org.apache.drill.exec.vector.Var16CharVector;
+import org.apache.drill.exec.vector.VarCharVector;
+import org.junit.Test;
+
+import com.google.common.base.Charsets;
+import com.google.common.io.Resources;
+
+import java.util.List;
+
+public class TestHiveUDFs extends BaseTestQuery {
+
+  @Test
+  public void testGenericUDF() throws Throwable {
+
+    int numRecords = 0;
+    String planString = Resources.toString(Resources.getResource("functions/hive/GenericUDF.json"), Charsets.UTF_8);
+    List<QueryResultBatch> results = testPhysicalWithResults(planString);
+
+    RecordBatchLoader batchLoader = new RecordBatchLoader(getAllocator());
+    for(QueryResultBatch result : results) {
+      batchLoader.load(result.getHeader().getDef(), result.getData());
+      if (batchLoader.getRecordCount() <= 0) {
+        result.release();
+        batchLoader.clear();
+        continue;
+      }
+      // Output columns and types
+      //  1. str1 : Var16Char
+      //  2. upperStr1 : NullableVar16Char
+      //  3. unix_timestamp : NullableBigInt
+      //  4. concat : NullableVarChar
+      //  5. flt1 : Float4
+      //  6. format_number : NullableFloat8
+      //  7. nullableStr1 : NullableVar16Char
+      //  8. upperNullableStr1 : NullableVar16Char
+      Var16CharVector str1V = (Var16CharVector) batchLoader.getValueAccessorById(Var16CharVector.class, 0).getValueVector();
+      NullableVar16CharVector upperStr1V = (NullableVar16CharVector) batchLoader.getValueAccessorById(NullableVar16CharVector.class, 1).getValueVector();
+      NullableBigIntVector unix_timestampV = (NullableBigIntVector) batchLoader.getValueAccessorById(NullableBigIntVector.class, 2).getValueVector();
+      NullableVar16CharVector concatV = (NullableVar16CharVector) batchLoader.getValueAccessorById(NullableVar16CharVector.class, 3).getValueVector();
+      Float4Vector flt1V = (Float4Vector) batchLoader.getValueAccessorById(Float4Vector.class, 4).getValueVector();
+      NullableVar16CharVector format_numberV = (NullableVar16CharVector) batchLoader.getValueAccessorById(NullableVar16CharVector.class, 5).getValueVector();
+      NullableVar16CharVector nullableStr1V = (NullableVar16CharVector) batchLoader.getValueAccessorById(NullableVar16CharVector.class, 6).getValueVector();
+      NullableVar16CharVector upperNullableStr1V = (NullableVar16CharVector) batchLoader.getValueAccessorById(NullableVar16CharVector.class, 7).getValueVector();
+
+      for(int i=0; i<batchLoader.getRecordCount(); i++) {
+        String in = new String(str1V.getAccessor().get(i), Charsets.UTF_16);
+        String upper = new String(upperStr1V.getAccessor().get(i), Charsets.UTF_16);
+        assertTrue(in.toUpperCase().equals(upper));
+
+        long unix_timestamp = unix_timestampV.getAccessor().get(i);
+
+        String concat = new String(concatV.getAccessor().get(i), Charsets.UTF_16);
+        assertTrue(concat.equals(in+"-"+in));
+
+        float flt1 = flt1V.getAccessor().get(i);
+        String format_number = new String(format_numberV.getAccessor().get(i), Charsets.UTF_16);
+
+
+        String nullableStr1 = null;
+        if (!nullableStr1V.getAccessor().isNull(i))
+          nullableStr1 = new String(nullableStr1V.getAccessor().get(i), Charsets.UTF_16);
+
+        String upperNullableStr1 = null;
+        if (!upperNullableStr1V.getAccessor().isNull(i))
+          upperNullableStr1 = new String(upperNullableStr1V.getAccessor().get(i), Charsets.UTF_16);
+
+        assertEquals(nullableStr1 != null, upperNullableStr1 != null);
+        if (nullableStr1 != null)
+          assertEquals(nullableStr1.toUpperCase(), upperNullableStr1);
+
+        System.out.println(in + ", " + upper + ", " + unix_timestamp + ", " + concat + ", " +
+          flt1 + ", " + format_number + ", " + nullableStr1 + ", " + upperNullableStr1);
+
+        numRecords++;
+      }
+
+      result.release();
+      batchLoader.clear();
+    }
+
+    System.out.println("Processed " + numRecords + " records");
+  }
+
+  @Test
+  public void testUDF() throws Throwable {
+    int numRecords = 0;
+    String planString = Resources.toString(Resources.getResource("functions/hive/UDF.json"), Charsets.UTF_8);
+    List<QueryResultBatch> results = testPhysicalWithResults(planString);
+
+    RecordBatchLoader batchLoader = new RecordBatchLoader(getAllocator());
+    for(QueryResultBatch result : results) {
+      batchLoader.load(result.getHeader().getDef(), result.getData());
+      if (batchLoader.getRecordCount() <= 0) {
+        result.release();
+        batchLoader.clear();
+        continue;
+      }
+
+      // Output columns and types
+      // 1. str1 : Var16Char
+      // 2. str1Length : Int
+      // 3. str1Ascii : Int
+      // 4. flt1 : Float4
+      // 5. pow : Float8
+      Var16CharVector str1V = (Var16CharVector) batchLoader.getValueAccessorById(Var16CharVector.class, 0).getValueVector();
+      NullableIntVector str1LengthV = (NullableIntVector) batchLoader.getValueAccessorById(NullableIntVector.class, 1).getValueVector();
+      NullableIntVector str1AsciiV = (NullableIntVector) batchLoader.getValueAccessorById(NullableIntVector.class, 2).getValueVector();
+      Float4Vector flt1V = (Float4Vector) batchLoader.getValueAccessorById(Float4Vector.class, 3).getValueVector();
+      NullableFloat8Vector powV = (NullableFloat8Vector) batchLoader.getValueAccessorById(NullableFloat8Vector.class, 4).getValueVector();
+
+      for(int i=0; i<batchLoader.getRecordCount(); i++) {
+        String str1 = new String(str1V.getAccessor().get(i), Charsets.UTF_16);
+        int str1Length = str1LengthV.getAccessor().get(i);
+        assertTrue(str1.length() == str1Length);
+
+        int str1Ascii = str1AsciiV.getAccessor().get(i);
+
+        float flt1 = flt1V.getAccessor().get(i);
+
+        double pow = 0;
+        if (!powV.getAccessor().isNull(i)) {
+          pow = powV.getAccessor().get(i);
+          assertTrue(Math.pow(flt1, 2.0) == pow);
+        }
+
+        System.out.println(str1 + ", " + str1Length + ", " + str1Ascii + ", " + flt1 + ", " + pow);
+        numRecords++;
+      }
+
+      result.release();
+      batchLoader.clear();
+    }
+
+    System.out.println("Processed " + numRecords + " records");
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
new file mode 100644
index 0000000..e051abb
--- /dev/null
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
@@ -0,0 +1,261 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.hive;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.sql.Date;
+import java.sql.Timestamp;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CommandNeedRetryException;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+
+public class HiveTestDataGenerator {
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveTestDataGenerator.class);
+
+  static int RETRIES = 5;
+  private Driver hiveDriver = null;
+  private static final String DB_DIR = "/tmp/drill_hive_db";
+  private static final String WH_DIR = "/tmp/drill_hive_wh";
+  
+  public static void main(String[] args) throws Exception {
+    HiveTestDataGenerator htd = new HiveTestDataGenerator();
+    htd.generateTestData();
+  }
+
+  private void cleanDir(String dir) throws IOException{
+    File f = new File(dir);
+    if(f.exists()){
+      FileUtils.cleanDirectory(f);
+      FileUtils.forceDelete(f);
+    }
+  }
+  
+  public void generateTestData() throws Exception {
+    
+    // remove data from previous runs.
+    cleanDir(DB_DIR);
+    cleanDir(WH_DIR);
+    
+    HiveConf conf = new HiveConf();
+
+    conf.set("javax.jdo.option.ConnectionURL", String.format("jdbc:derby:;databaseName=%s;create=true", DB_DIR));
+    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
+    conf.set("hive.metastore.warehouse.dir", WH_DIR);
+
+    SessionState ss = new SessionState(new HiveConf(SessionState.class));
+    SessionState.start(ss);
+    hiveDriver = new Driver(conf);
+
+    // generate (key, value) test data
+    String testDataFile = generateTestDataFile();
+
+    createTableAndLoadData("default", "kv", testDataFile);
+    executeQuery("CREATE DATABASE IF NOT EXISTS db1");
+    createTableAndLoadData("db1", "kv_db1", testDataFile);
+
+    // Generate data with date and timestamp data type
+    String testDateDataFile = generateTestDataFileWithDate();
+
+    // create table with date and timestamp data type
+    executeQuery("USE default");
+    executeQuery("CREATE TABLE IF NOT EXISTS default.foodate(a DATE, b TIMESTAMP) "+
+        "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE");
+    executeQuery(String.format("LOAD DATA LOCAL INPATH '%s' OVERWRITE INTO TABLE default.foodate", testDateDataFile));
+
+    // create a table with no data
+    executeQuery("CREATE TABLE IF NOT EXISTS default.empty_table(a INT, b STRING)");
+
+    // create a Hive table that has columns with data types which are supported for reading in Drill.
+    testDataFile = generateAllTypesDataFile();
+    executeQuery(
+        "CREATE TABLE IF NOT EXISTS readtest (" +
+        "  binary_field BINARY," +
+        "  boolean_field BOOLEAN," +
+        "  tinyint_field TINYINT," +
+        "  decimal_field DECIMAL," +
+        "  double_field DOUBLE," +
+        "  float_field FLOAT," +
+        "  int_field INT," +
+        "  bigint_field BIGINT," +
+        "  smallint_field SMALLINT," +
+        "  string_field STRING," +
+        "  varchar_field VARCHAR(50)," +
+        "  timestamp_field TIMESTAMP," +
+        "  date_field DATE" +
+        ") PARTITIONED BY (" +
+        "  binary_part BINARY," +
+        "  boolean_part BOOLEAN," +
+        "  tinyint_part TINYINT," +
+        "  decimal_part DECIMAL," +
+        "  double_part DOUBLE," +
+        "  float_part FLOAT," +
+        "  int_part INT," +
+        "  bigint_part BIGINT," +
+        "  smallint_part SMALLINT," +
+        "  string_part STRING," +
+        "  varchar_part VARCHAR(50)," +
+        "  timestamp_part TIMESTAMP," +
+        "  date_part DATE" +
+        ") ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE"
+    );
+
+    // Add a partition to table 'readtest'
+    executeQuery(
+        "ALTER TABLE readtest ADD IF NOT EXISTS PARTITION ( " +
+        "  binary_part='binary', " +
+        "  boolean_part='true', " +
+        "  tinyint_part='64', " +
+        "  decimal_part='3489423929323435243', " +
+        "  double_part='8.345', " +
+        "  float_part='4.67', " +
+        "  int_part='123456', " +
+        "  bigint_part='234235', " +
+        "  smallint_part='3455', " +
+        "  string_part='string', " +
+        "  varchar_part='varchar', " +
+        "  timestamp_part='2013-07-05 17:01:00', " +
+        "  date_part='2013-07-05')"
+    );
+
+    // Load data into table 'readtest'
+    executeQuery(String.format("LOAD DATA LOCAL INPATH '%s' OVERWRITE INTO TABLE default.readtest PARTITION (" +
+        "  binary_part='binary', " +
+        "  boolean_part='true', " +
+        "  tinyint_part='64', " +
+        "  decimal_part='3489423929323435243', " +
+        "  double_part='8.345', " +
+        "  float_part='4.67', " +
+        "  int_part='123456', " +
+        "  bigint_part='234235', " +
+        "  smallint_part='3455', " +
+        "  string_part='string', " +
+        "  varchar_part='varchar', " +
+        "  timestamp_part='2013-07-05 17:01:00', " +
+        "  date_part='2013-07-05')", testDataFile));
+
+    // create a table that has all Hive types. This is to test how hive tables metadata is populated in
+    // Drill's INFORMATION_SCHEMA.
+    executeQuery("CREATE TABLE IF NOT EXISTS infoschematest(" +
+        "booleanType BOOLEAN, " +
+        "tinyintType TINYINT, " +
+        "smallintType SMALLINT, " +
+        "intType INT, " +
+        "bigintType BIGINT, " +
+        "floatType FLOAT, " +
+        "doubleType DOUBLE, " +
+        "dataType DATE, " +
+        "timestampType TIMESTAMP, " +
+        "binaryType BINARY, " +
+        "decimalType DECIMAL, " +
+        "stringType STRING, " +
+        "varCharType VARCHAR(20), " +
+        "listType ARRAY<STRING>, " +
+        "mapType MAP<STRING,INT>, " +
+        "structType STRUCT<sint:INT,sboolean:BOOLEAN,sstring:STRING>, " +
+        "uniontypeType UNIONTYPE<int, double, array<string>>)"
+    );
+
+    // create a Hive view to test how its metadata is populated in Drill's INFORMATION_SCHEMA
+    executeQuery("CREATE VIEW IF NOT EXISTS hiveview AS SELECT * FROM kv");
+
+    ss.close();
+  }
+
+  private void createTableAndLoadData(String dbName, String tblName, String dataFile) {
+    executeQuery(String.format("USE %s", dbName));
+    executeQuery(String.format("CREATE TABLE IF NOT EXISTS %s.%s(key INT, value STRING) "+
+        "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE", dbName, tblName));
+    executeQuery(String.format("LOAD DATA LOCAL INPATH '%s' OVERWRITE INTO TABLE %s.%s", dataFile, dbName, tblName));
+  }
+
+  private File getTempFile() throws Exception {
+    File file = null;
+    while (true) {
+      file = File.createTempFile("drill-hive-test", ".txt");
+      if (file.exists()) {
+        boolean success = file.delete();
+        if (success) {
+          break;
+        }
+      }
+      logger.debug("retry creating tmp file");
+    }
+
+    return file;
+  }
+
+  private String generateTestDataFile() throws Exception {
+    File file = getTempFile();
+
+    PrintWriter printWriter = new PrintWriter(file);
+    for (int i=1; i<=5; i++)
+      printWriter.println (String.format("%d, key_%d", i, i));
+    printWriter.close();
+
+    return file.getPath();
+  }
+
+  private String generateTestDataFileWithDate() throws Exception {
+    File file = getTempFile();
+
+    PrintWriter printWriter = new PrintWriter(file);
+    for (int i=1; i<=5; i++) {
+      Date date = new Date(System.currentTimeMillis());
+      Timestamp ts = new Timestamp(System.currentTimeMillis());
+      printWriter.println (String.format("%s,%s", date.toString(), ts.toString()));
+    }
+    printWriter.close();
+
+    return file.getPath();
+  }
+
+  private String generateAllTypesDataFile() throws Exception {
+    File file = getTempFile();
+
+    PrintWriter printWriter = new PrintWriter(file);
+    printWriter.println("YmluYXJ5ZmllbGQ=,false,34,3489423929323435243,8.345,4.67,123456,234235,3455,stringfield,varcharfield,2013-07-05 17:01:00,2013-07-05");
+    printWriter.close();
+
+    return file.getPath();
+  }
+
+  private void executeQuery(String query) {
+    CommandProcessorResponse response = null;
+    boolean failed = false;
+    int retryCount = RETRIES;
+
+    try {
+      response = hiveDriver.run(query);
+    } catch(CommandNeedRetryException ex) {
+      if (--retryCount == 0)
+        failed = true;
+    }
+
+    if (failed || response.getResponseCode() != 0 )
+      throw new RuntimeException(String.format("Failed to execute command '%s', errorMsg = '%s'",
+        query, (response != null ? response.getErrorMessage() : "")));
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/test/resources/functions/hive/GenericUDF.json
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/resources/functions/hive/GenericUDF.json b/contrib/storage-hive/core/src/test/resources/functions/hive/GenericUDF.json
new file mode 100644
index 0000000..48731b8
--- /dev/null
+++ b/contrib/storage-hive/core/src/test/resources/functions/hive/GenericUDF.json
@@ -0,0 +1,45 @@
+{
+    head:{
+        type:"APACHE_DRILL_PHYSICAL",
+        version:"1",
+        generator:{
+            type:"manual"
+        }
+    },
+    graph:[
+        {
+            @id:1,
+            pop:"mock-scan",
+            url: "http://apache.org",
+            entries:[
+                {records: 100, types: [
+                   {name: "str1", type: "VAR16CHAR", mode: "REQUIRED"},
+                   {name: "str2", type: "VAR16CHAR", mode: "REQUIRED"},
+                   {name: "str3", type: "VAR16CHAR", mode: "REQUIRED"},
+                   {name: "nullableStr1", type: "VAR16CHAR", mode: "OPTIONAL"},
+                   {name: "flt1", type: "FLOAT4", mode: "REQUIRED"}
+                ]}
+            ]
+        },
+        {
+            @id:2,
+            child: 1,
+            pop:"project",
+            exprs: [
+                { ref: "str1", expr: "str1" },
+                { ref: "upperStr1", expr: "ucase(str1)" },
+                { ref: "unix_timestamp", expr: "unix_timestamp()" },
+                { ref: "concat", expr: "concat_ws('-', str2, str3)" },
+                { ref: "flt1", expr: "flt1" },
+                { ref: "format_number", expr: "format_number(cast(flt1 as float8), cast(2 as int))" },
+                { ref: "nullableStr1", expr: "nullableStr1" },
+                { ref: "upperNulableStr1", expr: "upper(nullableStr1)" }
+            ]
+        },
+        {
+            @id: 3,
+            child: 2,
+            pop: "screen"
+        }
+    ]
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/test/resources/functions/hive/UDF.json
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/resources/functions/hive/UDF.json b/contrib/storage-hive/core/src/test/resources/functions/hive/UDF.json
new file mode 100644
index 0000000..c2c7169
--- /dev/null
+++ b/contrib/storage-hive/core/src/test/resources/functions/hive/UDF.json
@@ -0,0 +1,39 @@
+{
+    head:{
+        type:"APACHE_DRILL_PHYSICAL",
+        version:"1",
+        generator:{
+            type:"manual"
+        }
+    },
+    graph:[
+        {
+            @id:1,
+            pop:"mock-scan",
+            url: "http://apache.org",
+            entries:[
+                {records: 100, types: [
+                   {name: "str1", type: "VAR16CHAR", mode: "REQUIRED"},
+                   {name: "flt1", type: "FLOAT4", mode: "REQUIRED"}
+                ]}
+            ]
+        },
+        {
+            @id:2,
+            child: 1,
+            pop:"project",
+            exprs: [
+                { ref: "str1", expr: "str1" },
+                { ref: "str1Length", expr: "length(str1)" },
+                { ref: "str1Ascii", expr: "ascii(str1)" },
+                { ref: "flt1", expr: "flt1" },
+                { ref: "pow", expr: "pow(cast(flt1 as float8), 2.0)" }
+            ]
+        },
+        {
+            @id: 3,
+            child: 2,
+            pop: "screen"
+        }
+    ]
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/test/resources/logback.xml
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/resources/logback.xml b/contrib/storage-hive/core/src/test/resources/logback.xml
new file mode 100644
index 0000000..385b80f
--- /dev/null
+++ b/contrib/storage-hive/core/src/test/resources/logback.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+  <timestamp key="bySecond" datePattern="yyyyMMdd'T'HHmmss"/>
+
+  <appender name="SOCKET"
+    class="de.huxhorn.lilith.logback.appender.ClassicMultiplexSocketAppender">
+    <Compressing>true</Compressing>
+    <ReconnectionDelay>10000</ReconnectionDelay>
+    <IncludeCallerData>true</IncludeCallerData>
+    <RemoteHosts>${LILITH_HOSTNAME:-localhost}</RemoteHosts>
+  </appender>
+
+  <appender name="FILE" class="ch.qos.logback.core.FileAppender">
+    <!-- The property 'logback.log.dir' is defined in pom.xml --> 
+    <file>${logback.log.dir:-./target/surefire-reports}/hive-tests-${bySecond}.log</file>
+    <append>false</append>
+    <encoder>
+      <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
+    </encoder>
+   </appender>
+
+  <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+    <encoder>
+      <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
+    </encoder>
+  </appender>
+
+  <logger name="org.apache.drill" additivity="false">
+    <level value="info" />
+    <appender-ref ref="FILE" />
+  </logger>
+
+  <logger name="org.apache.drill" additivity="false">
+    <level value="debug" />
+    <appender-ref ref="SOCKET" />
+  </logger>
+
+  <logger name="org.apache.hadoop" additivity="false">
+    <level value="info" />
+    <appender-ref ref="FILE" />
+  </logger>
+
+  <root>
+    <level value="error" />
+    <appender-ref ref="STDOUT" />
+  </root>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/hive-exec-shade/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/hive-exec-shade/pom.xml b/contrib/storage-hive/hive-exec-shade/pom.xml
new file mode 100644
index 0000000..105f7cd
--- /dev/null
+++ b/contrib/storage-hive/hive-exec-shade/pom.xml
@@ -0,0 +1,169 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+  xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.drill.contrib.storage-hive</groupId>
+    <artifactId>drill-contrib-storage-hive-parent</artifactId>
+    <version>1.0.0-m2-incubating-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>drill-hive-exec-shaded</artifactId>
+  <packaging>jar</packaging>
+  <name>contrib/hive-storage-plugin/hive-exec-shaded</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-exec</artifactId>
+      <version>0.12.0</version>
+      <scope>compile</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <artifactId>slf4j-log4j12</artifactId>
+          <groupId>org.slf4j</groupId>
+        </exclusion>
+        <exclusion>
+          <groupId>jline</groupId>
+          <artifactId>jline</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.google.guava</groupId>
+          <artifactId>guava</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>jline</groupId>
+          <artifactId>jline</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.google.protobuf</groupId>
+          <artifactId>protobuf-java</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-metastore</artifactId>
+      <version>0.12.0</version>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <artifactId>slf4j-log4j12</artifactId>
+          <groupId>org.slf4j</groupId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-hbase-handler</artifactId>
+      <version>0.12.0</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hbase</groupId>
+          <artifactId>hbase</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-shade-plugin</artifactId>
+        <version>2.1</version>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>shade</goal>
+            </goals>
+            <configuration>
+              <artifactSet>
+                <includes>
+                  <include>org.apache.hive:hive-exec</include>
+                </includes>
+              </artifactSet>
+              <createDependencyReducedPom>false</createDependencyReducedPom>
+              <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
+              <relocations>
+                <relocation>
+                  <pattern>com.google.</pattern>
+                  <shadedPattern>com.google.hive12.</shadedPattern>
+                </relocation>
+              </relocations>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <version>2.8</version>
+        <executions>
+          <execution>
+            <id>unpack</id>
+            <phase>package</phase>
+            <goals>
+              <goal>unpack</goal>
+            </goals>
+            <configuration>
+              <artifactItems>
+                <artifactItem>
+                  <groupId>org.apache.drill.contrib.storage-hive</groupId>
+                  <artifactId>drill-hive-exec-shaded</artifactId>
+                  <version>${project.version}</version>
+                  <type>jar</type>
+                  <overWrite>true</overWrite>
+                  <outputDirectory>${project.build.directory}/classes</outputDirectory>
+                  <includes>**/**</includes>
+                </artifactItem>
+              </artifactItems>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/hive-exec-shade/src/main/resources/drill-module.conf
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/hive-exec-shade/src/main/resources/drill-module.conf b/contrib/storage-hive/hive-exec-shade/src/main/resources/drill-module.conf
new file mode 100644
index 0000000..271395e
--- /dev/null
+++ b/contrib/storage-hive/hive-exec-shade/src/main/resources/drill-module.conf
@@ -0,0 +1,5 @@
+//  This file tells Drill to consider this module when class path scanning.
+//  This file can also include any supplementary configuration information.
+//  This file is in HOCON format, see https://github.com/typesafehub/config/blob/master/HOCON.md for more information.
+
+drill.logical.function.packages += "org.apache.hadoop.hive"

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/pom.xml b/contrib/storage-hive/pom.xml
index 6e416e4..569db29 100644
--- a/contrib/storage-hive/pom.xml
+++ b/contrib/storage-hive/pom.xml
@@ -15,151 +15,24 @@
  See the License for the specific language governing permissions and
  limitations under the License.
 -->
-<project
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
-  xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
-
   <parent>
-    <artifactId>drill-contrib-parent</artifactId>
     <groupId>org.apache.drill.contrib</groupId>
+    <artifactId>drill-contrib-parent</artifactId>
     <version>1.0.0-m2-incubating-SNAPSHOT</version>
   </parent>
 
-  <artifactId>storage-hive</artifactId>
-  <packaging>jar</packaging>
-  <name>contrib/storage-hive</name>
+  <groupId>org.apache.drill.contrib.storage-hive</groupId>
+  <artifactId>drill-contrib-storage-hive-parent</artifactId>
+  <name>contrib/hive-storage-plugin/Parent Pom</name>
+  <packaging>pom</packaging>
 
   <dependencies>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-exec</artifactId>
-      <version>0.12.0</version>
-      <scope>compile</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>commons-logging</groupId>
-          <artifactId>commons-logging</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>commons-logging</groupId>
-          <artifactId>commons-logging-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <artifactId>slf4j-log4j12</artifactId>
-          <groupId>org.slf4j</groupId>
-        </exclusion>
-        <exclusion>
-          <groupId>jline</groupId>
-          <artifactId>jline</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.google.guava</groupId>
-          <artifactId>guava</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>jline</groupId>
-          <artifactId>jline</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-metastore</artifactId>
-      <version>0.12.0</version>
-      <exclusions>
-        <exclusion>
-          <groupId>commons-logging</groupId>
-          <artifactId>commons-logging</artifactId>
-        </exclusion>
-        <exclusion>
-          <artifactId>slf4j-log4j12</artifactId>
-          <groupId>org.slf4j</groupId>
-        </exclusion>
-        <exclusion>
-          <groupId>commons-logging</groupId>
-          <artifactId>commons-logging-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>servlet-api</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-hbase-handler</artifactId>
-      <version>0.12.0</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hbase</groupId>
-          <artifactId>hbase</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
   </dependencies>
 
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-shade-plugin</artifactId>
-        <version>2.1</version>
-        <executions>
-          <execution>
-            <phase>package</phase>
-            <goals>
-              <goal>shade</goal>
-            </goals>
-            <configuration>
-              <artifactSet>
-                <includes>
-                  <include>org.apache.hive:hive-exec</include>
-                </includes>
-              </artifactSet>
-              <createDependencyReducedPom>false</createDependencyReducedPom>
-              <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
-              <relocations>
-                <relocation>
-                  <pattern>com.google.</pattern>
-                  <shadedPattern>com.google.hive12.</shadedPattern>
-                </relocation>
-              </relocations>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <version>2.8</version>
-        <executions>
-          <execution>
-            <id>unpack</id>
-            <phase>package</phase>
-            <goals>
-              <goal>unpack</goal>
-            </goals>
-            <configuration>
-              <artifactItems>
-                <artifactItem>
-                  <groupId>org.apache.drill.contrib</groupId>
-                  <artifactId>storage-hive</artifactId>
-                  <version>${project.version}</version>
-                  <type>jar</type>
-                  <overWrite>true</overWrite>
-                  <outputDirectory>${project.build.directory}/classes</outputDirectory>
-                  <includes>**/**</includes>
-                </artifactItem>
-              </artifactItems>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
+  <modules>
+    <module>hive-exec-shade</module>
+    <module>core</module>
+  </modules>
 </project>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/src/main/resources/drill-module.conf
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/src/main/resources/drill-module.conf b/contrib/storage-hive/src/main/resources/drill-module.conf
deleted file mode 100644
index 271395e..0000000
--- a/contrib/storage-hive/src/main/resources/drill-module.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-//  This file tells Drill to consider this module when class path scanning.
-//  This file can also include any supplementary configuration information.
-//  This file is in HOCON format, see https://github.com/typesafehub/config/blob/master/HOCON.md for more information.
-
-drill.logical.function.packages += "org.apache.hadoop.hive"

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/distribution/pom.xml
----------------------------------------------------------------------
diff --git a/distribution/pom.xml b/distribution/pom.xml
index 0a6b887..b395a7b 100644
--- a/distribution/pom.xml
+++ b/distribution/pom.xml
@@ -29,17 +29,6 @@
       <artifactId>sqlline</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.apache.drill.contrib</groupId>
-      <artifactId>storage-hive</artifactId>
-      <version>${project.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-exec</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
       <groupId>org.apache.drill</groupId>
       <artifactId>drill-protocol</artifactId>
       <version>${project.version}</version>
@@ -83,6 +72,11 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.drill.contrib.storage-hive</groupId>
+      <artifactId>drill-storage-hive-core</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
       <groupId>com.google.protobuf</groupId>
       <artifactId>protobuf-java</artifactId>
       <version>2.4.1</version>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/distribution/src/assemble/bin.xml
----------------------------------------------------------------------
diff --git a/distribution/src/assemble/bin.xml b/distribution/src/assemble/bin.xml
index e39073f..65bc6e0 100644
--- a/distribution/src/assemble/bin.xml
+++ b/distribution/src/assemble/bin.xml
@@ -46,10 +46,14 @@
         <include>org.apache.drill:drill-protocol:jar:rebuffed</include>
         <include>org.apache.drill:drill-common:jar:rebuffed</include>
         <include>org.apache.drill.exec:drill-java-exec:jar:rebuffed</include>
-        <include>org.apache.drill.contrib:storage-hive:jar</include>
+        <include>org.apache.drill.contrib.storage-hive:drill-storage-hive-core</include>
+        <include>org.apache.drill.contrib.storage-hive:drill-hive-exec-shaded</include>
         <include>org.apache.drill.contrib.data:tpch-sample-data:jar</include>
         <include>org.apache.drill.contrib:drill-storage-hbase</include>
       </includes>
+      <excludes>
+        <exclude>org.apache.drill.contrib.storage-hive:drill-storage-hive-core:jar:tests</exclude>
+      </excludes>
       <outputDirectory>jars</outputDirectory>
       <useProjectArtifact>false</useProjectArtifact>
     </dependencySet>  
@@ -63,6 +67,7 @@
         <exclude>org.apache.drill</exclude>
         <exclude>org.apache.drill.exec</exclude>
         <exclude>org.apache.drill.contrib</exclude>
+        <exclude>org.apache.drill.contrib.storage-hive</exclude>
         <exclude>org.hsqldb:hsqldb</exclude>
         <!-- exclude or sqlline has problems -->
         <exclude>hsqldb:hsqldb</exclude>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/pom.xml
----------------------------------------------------------------------
diff --git a/exec/java-exec/pom.xml b/exec/java-exec/pom.xml
index 3c245b8..81104e1 100644
--- a/exec/java-exec/pom.xml
+++ b/exec/java-exec/pom.xml
@@ -67,11 +67,6 @@
       <version>2.6</version>
     </dependency>
     <dependency>
-      <groupId>org.codehaus.janino</groupId>
-      <artifactId>janino</artifactId>
-      <version>2.7.3</version>
-    </dependency>
-    <dependency>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-server</artifactId>
       <version>9.1.5.v20140505</version>
@@ -167,17 +162,6 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.apache.drill.contrib</groupId>
-      <artifactId>storage-hive</artifactId>
-      <version>${project.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-exec</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
       <groupId>org.apache.drill</groupId>
       <artifactId>drill-common</artifactId>
       <version>${project.version}</version>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/codegen/config.fmpp
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/config.fmpp b/exec/java-exec/src/main/codegen/config.fmpp
index d00e24a..e9293a8 100644
--- a/exec/java-exec/src/main/codegen/config.fmpp
+++ b/exec/java-exec/src/main/codegen/config.fmpp
@@ -20,7 +20,6 @@ data: {
     cast: tdd(../data/Casts.tdd),
     MathFunctionTypes: tdd(../data/MathFunctionTypes.tdd),
     mathFunc:tdd(../data/MathFunc.tdd),
-    drillOI:tdd(../data/HiveTypes.tdd),
     aggrtypes1: tdd(../data/AggrTypes1.tdd),
     decimalaggrtypes1: tdd(../data/DecimalAggrTypes1.tdd),
     aggrtypes2: tdd(../data/AggrTypes2.tdd),
@@ -38,4 +37,4 @@ data: {
 }
 freemarkerLinks: {
     includes: includes/
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/codegen/data/HiveTypes.tdd
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/data/HiveTypes.tdd b/exec/java-exec/src/main/codegen/data/HiveTypes.tdd
deleted file mode 100644
index c23f981..0000000
--- a/exec/java-exec/src/main/codegen/data/HiveTypes.tdd
+++ /dev/null
@@ -1,100 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http:# www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{
-  map: [
-    {
-      hiveType: "BOOLEAN",
-      hiveOI: "BooleanObjectInspector",
-      serdeConstant: "BOOLEAN_TYPE_NAME",
-      javaType: "boolean",
-      minorType: "BIT",
-      holder: "Bit"
-    },
-    {
-      hiveType: "BYTE",
-      hiveOI: "ByteObjectInspector",
-      serdeConstant: "TINYINT_TYPE_NAME",
-      javaType: "byte",
-      minorType: "TINYINT",
-      holder: "TinyInt"
-    },
-    {
-      hiveType: "SHORT",
-      hiveOI: "ShortObjectInspector",
-      serdeConstant: "SMALLINT_TYPE_NAME",
-      javaType: "short",
-      minorType: "SMALLINT",
-      holder: "SmallInt"
-    },
-    {
-      hiveType: "INT",
-      hiveOI: "IntObjectInspector",
-      serdeConstant: "INT_TYPE_NAME",
-      javaType: "int",
-      minorType: "INT",
-      holder: "Int"
-    },
-    {
-      hiveType: "LONG",
-      hiveOI: "LongObjectInspector",
-      serdeConstant: "BIGINT_TYPE_NAME",
-      javaType: "long",
-      minorType: "BIGINT",
-      holder: "BigInt"
-    },
-    {
-      hiveType: "FLOAT",
-      hiveOI: "FloatObjectInspector",
-      serdeConstant: "FLOAT_TYPE_NAME",
-      javaType: "float",
-      minorType: "FLOAT4",
-      holder: "Float4"
-    },
-    {
-      hiveType: "DOUBLE",
-      hiveOI: "DoubleObjectInspector",
-      serdeConstant: "DOUBLE_TYPE_NAME",
-      javaType: "double",
-      minorType: "FLOAT8",
-      holder: "Float8"
-    },
-    {
-      hiveType: "VARCHAR",
-      hiveOI: "HiveVarcharObjectInspector",
-      serdeConstant: "VARCHAR_TYPE_NAME",
-      javaType: "",
-      minorType: "VARCHAR",
-      holder: "VarChar"
-    },
-    {
-      hiveType: "STRING",
-      hiveOI: "StringObjectInspector",
-      serdeConstant: "STRING_TYPE_NAME",
-      javaType: "",
-      minorType: "VAR16CHAR",
-      holder: "Var16Char"
-    },
-    {
-      hiveType: "BINARY",
-      hiveOI: "BinaryObjectInspector",
-      serdeConstant: "BINARY_TYPE_NAME",
-      javaType: "",
-      minorType: "VARBINARY",
-      holder: "VarBinary"
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/codegen/templates/ObjectInspectorHelper.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/ObjectInspectorHelper.java b/exec/java-exec/src/main/codegen/templates/ObjectInspectorHelper.java
deleted file mode 100644
index 22a9eb2..0000000
--- a/exec/java-exec/src/main/codegen/templates/ObjectInspectorHelper.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-<@pp.dropOutputFile />
-<@pp.changeOutputFile name="/org/apache/drill/exec/expr/fn/impl/hive/ObjectInspectorHelper.java" />
-
-<#include "/@includes/license.ftl" />
-
-package org.apache.drill.exec.expr.fn.impl.hive;
-
-import com.sun.codemodel.*;
-
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.TypeProtos.MinorType;
-import org.apache.drill.exec.expr.DirectExpression;
-import org.apache.drill.exec.expr.TypeHelper;
-import org.apache.drill.exec.expr.holders.*;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.*;
-
-import java.util.HashMap;
-import java.util.Map;
-
-public class ObjectInspectorHelper {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ObjectInspectorHelper.class);
-
-  private static Map<MinorType, Class> OIMAP = new HashMap<>();
-  static {
-<#list drillOI.map as entry>
-    OIMAP.put(MinorType.${entry.minorType}, Drill${entry.holder}ObjectInspector.class);
-</#list>
-  }
-
-  public static ObjectInspector getDrillObjectInspector(MinorType drillType) {
-    if (OIMAP.containsKey(drillType)) {
-      try {
-        return (ObjectInspector)OIMAP.get(drillType).newInstance();
-      } catch(InstantiationException | IllegalAccessException e) {
-        throw new RuntimeException("Failed to instantiate ObjectInspector", e);
-      }
-    }
-
-    throw new UnsupportedOperationException(drillType.toString());
-  }
-
-  public static JBlock initReturnValueHolder(JCodeModel m, JVar returnValueHolder, ObjectInspector oi, MinorType returnType) {
-    JBlock block = new JBlock(false, false);
-    switch(oi.getCategory()) {
-      case PRIMITIVE: {
-        PrimitiveObjectInspector poi = (PrimitiveObjectInspector)oi;
-        switch(poi.getPrimitiveCategory()) {
-<#list drillOI.map as entry>
-          case ${entry.hiveType}:{
-            JType holderClass = TypeHelper.getHolderType(m, returnType, TypeProtos.DataMode.OPTIONAL);
-            block.assign(returnValueHolder, JExpr._new(holderClass));
-
-          <#if entry.hiveType == "VARCHAR" || entry.hiveType == "STRING" || entry.hiveType == "BINARY">
-            block.assign(returnValueHolder.ref("buffer"),
-              m.directClass(io.netty.buffer.Unpooled.class.getCanonicalName())
-                .staticInvoke("wrappedBuffer")
-                .arg(JExpr.newArray(m._ref(byte.class), JExpr.lit(1000))));
-          </#if>
-            return block;
-          }
-</#list>
-          default:
-            throw new UnsupportedOperationException(String.format("Received unknown/unsupported type '%s'", poi.getPrimitiveCategory().toString()));
-        }
-      }
-
-      case MAP:
-      case LIST:
-      case STRUCT:
-      default:
-        throw new UnsupportedOperationException(String.format("Received unknown/unsupported type '%s'", oi.getCategory().toString()));
-    }
-  }
-
-  private static Map<PrimitiveCategory, MinorType> TYPE_HIVE2DRILL = new HashMap<>();
-  static {
-<#list drillOI.map as entry>
-    TYPE_HIVE2DRILL.put(PrimitiveCategory.${entry.hiveType}, MinorType.${entry.minorType});
-</#list>
-  }
-
-  public static MinorType getDrillType(ObjectInspector oi) {
-    switch(oi.getCategory()) {
-      case PRIMITIVE: {
-        PrimitiveObjectInspector poi = (PrimitiveObjectInspector)oi;
-        if (TYPE_HIVE2DRILL.containsKey(poi.getPrimitiveCategory())) {
-          return TYPE_HIVE2DRILL.get(poi.getPrimitiveCategory());
-        }
-        throw new UnsupportedOperationException();
-      }
-
-      case MAP:
-      case LIST:
-      case STRUCT:
-      default:
-        throw new UnsupportedOperationException();
-    }
-  }
-
-  public static JBlock getDrillObject(JCodeModel m, ObjectInspector oi,
-    JVar returnOI, JVar returnValueHolder, JVar returnValue) {
-    JBlock block = new JBlock(false, false);
-    switch(oi.getCategory()) {
-      case PRIMITIVE: {
-        PrimitiveObjectInspector poi = (PrimitiveObjectInspector)oi;
-        switch(poi.getPrimitiveCategory()) {
-<#list drillOI.map as entry>
-          case ${entry.hiveType}:{
-            JConditional jc = block._if(returnValue.eq(JExpr._null()));
-            jc._then().assign(returnValueHolder.ref("isSet"), JExpr.lit(0));
-            jc._else().assign(returnValueHolder.ref("isSet"), JExpr.lit(1));
-            JVar castedOI = jc._else().decl(
-              m.directClass(${entry.hiveOI}.class.getCanonicalName()), "castOI", JExpr._null());
-            jc._else().assign(castedOI,
-              JExpr.cast(m.directClass(${entry.hiveOI}.class.getCanonicalName()), returnOI));
-
-          <#if entry.hiveType == "BOOLEAN">
-            JConditional booleanJC = jc._else()._if(castedOI.invoke("get").arg(returnValue));
-            booleanJC._then().assign(returnValueHolder.ref("value"), JExpr.lit(1));
-            booleanJC._else().assign(returnValueHolder.ref("value"), JExpr.lit(0));
-
-          <#elseif entry.hiveType == "VARCHAR">
-            JVar data = jc._else().decl(m.directClass(byte[].class.getCanonicalName()), "data",
-              castedOI.invoke("getPrimitiveJavaObject").arg(returnValue)
-                      .invoke("getValue")
-                      .invoke("getBytes"));
-
-            jc._else().add(returnValueHolder.ref("buffer")
-              .invoke("setBytes").arg(JExpr.lit(0)).arg(data));
-
-
-            jc._else().assign(returnValueHolder.ref("start"), JExpr.lit(0));
-            jc._else().assign(returnValueHolder.ref("end"), data.ref("length"));
-
-          <#elseif entry.hiveType == "STRING">
-            JVar data = jc._else().decl(m.directClass(byte[].class.getCanonicalName()), "data",
-              castedOI.invoke("getPrimitiveJavaObject").arg(returnValue)
-                      .invoke("getBytes").arg(DirectExpression.direct("com.google.common.base.Charsets.UTF_16")));
-            jc._else().add(returnValueHolder.ref("buffer")
-              .invoke("setBytes").arg(JExpr.lit(0)).arg(data));
-            jc._else().assign(returnValueHolder.ref("start"), JExpr.lit(0));
-            jc._else().assign(returnValueHolder.ref("end"), data.ref("length"));
-          <#elseif entry.hiveType == "BINARY">
-
-            JVar data = jc._else().decl(m.directClass(byte[].class.getCanonicalName()), "data",
-              castedOI.invoke("getPrimitiveJavaObject").arg(returnValue));
-            jc._else().add(returnValueHolder.ref("buffer")
-                .invoke("setBytes").arg(JExpr.lit(0)).arg(data));
-            jc._else().assign(returnValueHolder.ref("start"), JExpr.lit(0));
-            jc._else().assign(returnValueHolder.ref("end"), data.ref("length"));
-
-          <#else>
-            jc._else().assign(returnValueHolder.ref("value"),
-              castedOI.invoke("get").arg(returnValue));
-          </#if>
-            return block;
-          }
-
-</#list>
-          default:
-            throw new UnsupportedOperationException(String.format("Received unknown/unsupported type '%s'", poi.getPrimitiveCategory().toString()));
-        }
-      }
-
-      case MAP:
-      case LIST:
-      case STRUCT:
-      default:
-        throw new UnsupportedOperationException(String.format("Received unknown/unsupported type '%s'", oi.getCategory().toString()));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/codegen/templates/ObjectInspectors.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/ObjectInspectors.java b/exec/java-exec/src/main/codegen/templates/ObjectInspectors.java
deleted file mode 100644
index 9a8c837..0000000
--- a/exec/java-exec/src/main/codegen/templates/ObjectInspectors.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-<@pp.dropOutputFile />
-
-<#list drillOI.map as entry>
-<@pp.changeOutputFile name="/org/apache/drill/exec/expr/fn/impl/hive/Drill${entry.holder}ObjectInspector.java" />
-
-<#include "/@includes/license.ftl" />
-
-package org.apache.drill.exec.expr.fn.impl.hive;
-
-import org.apache.drill.exec.expr.holders.*;
-import org.apache.hadoop.hive.common.type.HiveVarchar;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.*;
-import org.apache.hadoop.io.Text;
-
-public class Drill${entry.holder}ObjectInspector extends AbstractPrimitiveObjectInspector
-  implements ${entry.hiveOI} {
-
-  @Override
-  public String getTypeName() {
-    return serdeConstants.${entry.serdeConstant};
-  }
-
-<#if entry.minorType == "VARCHAR">
-  @Override
-  public HiveVarcharWritable getPrimitiveWritableObject(Object o) {
-    HiveVarcharWritable valW = new HiveVarcharWritable();
-    valW.set(getPrimitiveJavaObject(o));
-    return valW;
-  }
-
-  @Override
-  public HiveVarchar getPrimitiveJavaObject(Object o) {
-    String val = ((VarCharHolder)o).toString();
-    return new HiveVarchar(val, HiveVarchar.MAX_VARCHAR_LENGTH);
-  }
-<#elseif entry.minorType == "VAR16CHAR">
-@Override
-  public Text getPrimitiveWritableObject(Object o) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public String getPrimitiveJavaObject(Object o) {
-    if (o instanceof Var16CharHolder)
-    return ((Var16CharHolder)o).toString();
-    else
-    return ((NullableVar16CharHolder)o).toString();
-  }
-<#elseif entry.minorType == "VARBINARY">  
-@Override
-public org.apache.hadoop.io.BytesWritable getPrimitiveWritableObject(Object o) {
-  throw new UnsupportedOperationException();
-}
-
-@Override
-public byte[] getPrimitiveJavaObject(Object o) {
-  if (o instanceof VarBinaryHolder){
-    VarBinaryHolder h = (VarBinaryHolder)o;
-    byte[] buf = new byte[h.end-h.start];
-    h.buffer.getBytes(h.start, buf, 0, h.end-h.start);
-    return buf;
-  }else{
-    NullableVarBinaryHolder h = (NullableVarBinaryHolder)o;
-    byte[] buf = new byte[h.end-h.start];
-    h.buffer.getBytes(h.start, buf, 0, h.end-h.start);
-    return buf;
-    
-  }
-}
-<#elseif entry.minorType == "BIT">
-  @Override
-  public boolean get(Object o) {
-    if (o instanceof BitHolder)
-    return ((BitHolder)o).value == 0 ? false : true;
-    else
-    return ((NullableBitHolder)o).value == 0 ? false : true;
-  }
-<#else>
-  @Override
-  public ${entry.javaType} get(Object o) {
-    if (o instanceof ${entry.holder}Holder)
-    return ((${entry.holder}Holder)o).value;
-    else
-    return ((Nullable${entry.holder}Holder)o).value;
-  }
-</#if>
-
-  @Override
-  public PrimitiveCategory getPrimitiveCategory() {
-    return PrimitiveCategory.${entry.hiveType};
-  }
-}
-
-</#list>
-

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
deleted file mode 100644
index fd19e3d..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.expr;
-
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.drill.common.expression.ExpressionPosition;
-import org.apache.drill.common.expression.FunctionHolderExpression;
-import org.apache.drill.common.expression.LogicalExpression;
-import org.apache.drill.common.expression.fn.FuncHolder;
-import org.apache.drill.common.expression.visitors.ExprVisitor;
-import org.apache.drill.common.types.TypeProtos.MajorType;
-
-import org.apache.drill.exec.expr.fn.HiveFuncHolder;
-
-public class HiveFuncHolderExpr extends FunctionHolderExpression implements Iterable<LogicalExpression>{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillFuncHolderExpr.class);
-  private HiveFuncHolder holder;
-
-  public HiveFuncHolderExpr(String nameUsed, HiveFuncHolder holder, List<LogicalExpression> args, ExpressionPosition pos) {
-    super(nameUsed, pos, args);
-    this.holder = holder;
-  }
-
-  @Override
-  public MajorType getMajorType() {
-    return holder.getReturnType();
-  }
-
-  @Override
-  public Iterator<LogicalExpression> iterator() {
-    return args.iterator();
-  }
-
-  public FuncHolder getHolder() {
-    return holder;
-  }
-
-  @Override
-  public boolean isAggregating() {
-    return holder.isAggregating();
-  }
-
-  @Override
-  public boolean argConstantOnly(int i) {
-    // looks like hive UDF has no notion of constant argument input
-    return false;
-  }
-
-  @Override
-  public boolean isRandom() {
-    return holder.isRandom();
-  }
-  
-  @Override
-  public HiveFuncHolderExpr copy(List<LogicalExpression> args) {
-    return new HiveFuncHolderExpr(this.nameUsed, this.holder, args, this.getPosition());
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
deleted file mode 100644
index 813d4c5..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
+++ /dev/null
@@ -1,272 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.expr.fn;
-
-import com.sun.codemodel.JBlock;
-import com.sun.codemodel.JCatchBlock;
-import com.sun.codemodel.JClass;
-import com.sun.codemodel.JCodeModel;
-import com.sun.codemodel.JConditional;
-import com.sun.codemodel.JExpr;
-import com.sun.codemodel.JInvocation;
-import com.sun.codemodel.JTryBlock;
-import com.sun.codemodel.JVar;
-import org.apache.drill.common.expression.ExpressionPosition;
-import org.apache.drill.common.expression.FunctionHolderExpression;
-import org.apache.drill.common.expression.LogicalExpression;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.TypeProtos.MajorType;
-import org.apache.drill.exec.expr.ClassGenerator;
-import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
-import org.apache.drill.exec.expr.HiveFuncHolderExpr;
-import org.apache.drill.exec.expr.TypeHelper;
-import org.apache.drill.exec.expr.fn.impl.hive.DrillDeferredObject;
-import org.apache.drill.exec.expr.fn.impl.hive.ObjectInspectorHelper;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.ql.exec.UDF;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
-
-import java.util.List;
-
-public class HiveFuncHolder extends AbstractFuncHolder {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FunctionImplementationRegistry.class);
-
-  private MajorType[] argTypes;
-  private ObjectInspector returnOI;
-  private MajorType returnType;
-  private Class<? extends GenericUDF> genericUdfClazz;
-  private boolean isGenericUDF = true;
-  private Class<? extends UDF> udfClazz = null;
-  private String udfName = "";
-  private boolean isRandom;
-
-
-  /**
-   * Create holder for GenericUDF
-   * @param genericUdfClazz implementation class
-   * @param argTypes
-   * @param returnOI
-   * @param returnType
-   */
-  public HiveFuncHolder(Class<? extends GenericUDF> genericUdfClazz, MajorType[] argTypes,
-                        ObjectInspector returnOI, MajorType returnType, boolean isRandom) {
-    this.genericUdfClazz = genericUdfClazz;
-    this.argTypes = argTypes;
-    this.returnOI = returnOI;
-    this.returnType = returnType;
-    this.isRandom = isRandom;
-  }
-
-  /**
-   * Create holder for UDF
-   * @param udfName name of the UDF class
-   * @param udfClazz UDF implementation class
-   * @param argTypes
-   * @param returnOI
-   * @param returnType
-   */
-  public HiveFuncHolder(String udfName, Class< ? extends UDF> udfClazz, MajorType[] argTypes,
-                        ObjectInspector returnOI, MajorType returnType, boolean isRandom) {
-    this(GenericUDFBridge.class, argTypes, returnOI, returnType, isRandom);
-    this.isGenericUDF = false;
-    this.udfClazz = udfClazz;
-    this.udfName = udfName;
-  }
-
-  /**
-   * UDF return type
-   */
-  public MajorType getReturnType() {
-    return returnType;
-  }
-
-  /**
-   * Aggregate function
-   */
-  public boolean isAggregating() {
-    // currently only simple UDFS are supported
-    return false;
-  }
-
-  /**
-   * is the function non-deterministic?
-   */
-  public boolean isRandom() {
-    return isRandom;
-  }
-
-  /**
-   * Start generating code
-   * @return workspace variables
-   */
-  @Override
-  public JVar[] renderStart(ClassGenerator<?> g, HoldingContainer[] inputVariables){
-    JVar[] workspaceJVars = new JVar[5];
-
-    workspaceJVars[0] = g.declareClassField("returnOI", g.getModel()._ref(ObjectInspector.class));
-    workspaceJVars[1] = g.declareClassField("udfInstance", g.getModel()._ref(GenericUDF.class));
-    workspaceJVars[2] = g.declareClassField("deferredObjects", g.getModel()._ref(DrillDeferredObject[].class));
-    workspaceJVars[3] = g.declareClassField("arguments", g.getModel()._ref(DrillDeferredObject[].class));
-    workspaceJVars[4] = g.declareClassField("returnValueHolder",
-      TypeHelper.getHolderType(g.getModel(), returnType.getMinorType(), TypeProtos.DataMode.OPTIONAL));
-
-    return workspaceJVars;
-  }
-
-  /**
-   * Complete code generation
-   * @param g
-   * @param inputVariables
-   * @param workspaceJVars
-   * @return HoldingContainer for return value
-   */
-  @Override
-  public HoldingContainer renderEnd(ClassGenerator<?> g, HoldingContainer[] inputVariables, JVar[]  workspaceJVars) {
-    generateSetup(g, workspaceJVars);
-    return generateEval(g, inputVariables, workspaceJVars);
-  }
-
-  private JInvocation getUDFInstance(JCodeModel m) {
-    if (isGenericUDF) {
-      return JExpr._new(m.directClass(genericUdfClazz.getCanonicalName()));
-    } else {
-      return JExpr._new(m.directClass(GenericUDFBridge.class.getCanonicalName()))
-        .arg(JExpr.lit(udfName))
-        .arg(JExpr.lit(false))
-        .arg(JExpr.dotclass(m.directClass(udfClazz.getCanonicalName())));
-    }
-  }
-
-  @Override
-  public FunctionHolderExpression getExpr(String name, List<LogicalExpression> args, ExpressionPosition pos) {
-    return new HiveFuncHolderExpr(name, this, args, pos);
-  }
-
-  private void generateSetup(ClassGenerator<?> g, JVar[] workspaceJVars) {
-    JCodeModel m = g.getModel();
-    JBlock sub = new JBlock(true, true);
-
-    // declare and instantiate argument ObjectInspector's
-    JVar oiArray = sub.decl(
-      m._ref(ObjectInspector[].class),
-      "argOIs",
-      JExpr.newArray(m._ref(ObjectInspector.class), argTypes.length));
-
-    JClass oih = m.directClass(ObjectInspectorHelper.class.getCanonicalName());
-    JClass mt = m.directClass(TypeProtos.MinorType.class.getCanonicalName());
-    for(int i=0; i<argTypes.length; i++) {
-      sub.assign(
-        oiArray.component(JExpr.lit(i)),
-        oih.staticInvoke("getDrillObjectInspector")
-          .arg(mt.staticInvoke("valueOf")
-            .arg(JExpr.lit(argTypes[i].getMinorType().getNumber()))));
-    }
-
-    // declare and instantiate DeferredObject array
-    sub.assign(workspaceJVars[2], JExpr.newArray(m._ref(DrillDeferredObject.class), argTypes.length));
-
-    for(int i=0; i<argTypes.length; i++) {
-      sub.assign(
-        workspaceJVars[2].component(JExpr.lit(i)),
-        JExpr._new(m.directClass(DrillDeferredObject.class.getCanonicalName())));
-    }
-
-    // declare empty array for argument deferred objects
-    sub.assign(workspaceJVars[3], JExpr.newArray(m._ref(DrillDeferredObject.class), argTypes.length));
-
-    // create new instance of the UDF class
-    sub.assign(workspaceJVars[1], getUDFInstance(m));
-
-    // create try..catch block to initialize the UDF instance with argument OIs
-    JTryBlock udfInitTry = sub._try();
-    udfInitTry.body().assign(
-      workspaceJVars[0],
-      workspaceJVars[1].invoke("initialize")
-      .arg(oiArray));
-
-    JCatchBlock udfInitCatch = udfInitTry._catch(m.directClass(Exception.class.getCanonicalName()));
-    JVar exVar = udfInitCatch.param("ex");
-    udfInitCatch.body()
-      ._throw(JExpr._new(m.directClass(RuntimeException.class.getCanonicalName()))
-        .arg(JExpr.lit(String.format("Failed to initialize GenericUDF"))).arg(exVar));
-
-    sub.add(ObjectInspectorHelper.initReturnValueHolder(m, workspaceJVars[4], returnOI, returnType.getMinorType()));
-
-    // now add it to the doSetup block in Generated class
-    JBlock setup = g.getBlock(ClassGenerator.BlockType.SETUP);
-    setup.directStatement(String.format("/** start %s for function %s **/ ",
-      ClassGenerator.BlockType.SETUP.name(), genericUdfClazz.getName() + (!isGenericUDF ? "("+udfName+")" : "")));
-
-    setup.add(sub);
-
-    setup.directStatement(String.format("/** end %s for function %s **/ ",
-      ClassGenerator.BlockType.SETUP.name(), genericUdfClazz.getName() + (!isGenericUDF ? "("+udfName+")" : "")));
-  }
-
-  private HoldingContainer generateEval(ClassGenerator<?> g, HoldingContainer[] inputVariables, JVar[] workspaceJVars) {
-
-    HoldingContainer out = g.declare(returnType);
-
-    JCodeModel m = g.getModel();
-    JBlock sub = new JBlock(true, true);
-
-    // initialize DeferredObject's. For an optional type, assign the value holder only if it is not null
-    for(int i=0; i<argTypes.length; i++) {
-      if (inputVariables[i].isOptional()) {
-        JBlock conditionalBlock = new JBlock(false, false);
-        JConditional jc = conditionalBlock._if(inputVariables[i].getIsSet().ne(JExpr.lit(0)));
-        jc._then().assign(workspaceJVars[3].component(JExpr.lit(i)), workspaceJVars[2].component(JExpr.lit(i)));
-        jc._then().assign(JExpr.ref(workspaceJVars[3].component(JExpr.lit(i)), "valueHolder"), inputVariables[i].getHolder());
-        jc._else().assign(workspaceJVars[3].component(JExpr.lit(i)), JExpr._null());
-        sub.add(conditionalBlock);
-      } else {
-        sub.assign(workspaceJVars[3].component(JExpr.lit(i)), workspaceJVars[2].component(JExpr.lit(i)));
-        sub.assign(JExpr.ref(workspaceJVars[3].component(JExpr.lit(i)), "valueHolder"), inputVariables[i].getHolder());
-      }
-    }
-
-    // declare generic object for storing return value from GenericUDF.evaluate
-    JVar retVal = sub.decl(m._ref(Object.class), "ret");
-
-    // create try..catch block to call the GenericUDF instance with given input
-    JTryBlock udfEvalTry = sub._try();
-    udfEvalTry.body().assign(retVal,
-      workspaceJVars[1].invoke("evaluate").arg(workspaceJVars[3]));
-
-    JCatchBlock udfEvalCatch = udfEvalTry._catch(m.directClass(Exception.class.getCanonicalName()));
-    JVar exVar = udfEvalCatch.param("ex");
-    udfEvalCatch.body()
-      ._throw(JExpr._new(m.directClass(RuntimeException.class.getCanonicalName()))
-        .arg(JExpr.lit(String.format("GenericUDF.evaluate method failed"))).arg(exVar));
-
-    // get the ValueHolder from retVal and return ObjectInspector
-    sub.add(ObjectInspectorHelper.getDrillObject(m, returnOI, workspaceJVars[0], workspaceJVars[4], retVal));
-    sub.assign(out.getHolder(), workspaceJVars[4]);
-
-    // now add it to the doEval block in Generated class
-    JBlock setup = g.getBlock(ClassGenerator.BlockType.EVAL);
-    setup.directStatement(String.format("/** start %s for function %s **/ ",
-      ClassGenerator.BlockType.EVAL.name(), genericUdfClazz.getName() + (!isGenericUDF ? "("+udfName+")" : "")));
-    setup.add(sub);
-    setup.directStatement(String.format("/** end %s for function %s **/ ",
-      ClassGenerator.BlockType.EVAL.name(), genericUdfClazz.getName() + (!isGenericUDF ? "("+udfName+")" : "")));
-
-    return out;
-  }
-}


[25/32] git commit: DRILL-1025: Refactoring - Define function holder and registry interfaces

Posted by ja...@apache.org.
DRILL-1025: Refactoring - Define function holder and registry interfaces

This change is needed in order to pull out Hive storage/function source code out of
'exec/java-exec' moudle into 'contrib/storage-hive' module. Currently 'exec/java-exec'
depends on 'contrib/storage-hive' (which currently just builds a shaded hive-exec jar).
Ideally 'contrib/storage-hive' should contain the Hive storage/function source code so
that there is a clear separation between core execution engine and Hive storage engine
code.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/026d51a2
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/026d51a2
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/026d51a2

Branch: refs/heads/master
Commit: 026d51a2755faed38df1b0c7ecd84c5c1dab687b
Parents: 136614f
Author: vkorukanti <ve...@gmail.com>
Authored: Sun Jun 15 23:49:00 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Fri Jun 20 10:56:16 2014 -0700

----------------------------------------------------------------------
 .../expression/FunctionHolderExpression.java    |   5 +-
 .../drill/common/expression/fn/FuncHolder.java  |  23 +++
 .../drill/exec/expr/DrillFuncHolderExpr.java    |   9 +-
 .../drill/exec/expr/EvaluationVisitor.java      |  51 ++----
 .../exec/expr/ExpressionTreeMaterializer.java   |  32 ++--
 .../drill/exec/expr/HiveFuncHolderExpr.java     |   3 +-
 .../drill/exec/expr/fn/AbstractFuncHolder.java  |  45 +++++
 .../drill/exec/expr/fn/DrillFuncHolder.java     |  14 +-
 .../fn/DrillFunctionImplementationRegistry.java |  65 --------
 .../exec/expr/fn/DrillFunctionRegistry.java     |  86 ++++++++++
 .../exec/expr/fn/FunctionGenerationHelper.java  |  12 +-
 .../expr/fn/FunctionImplementationRegistry.java | 107 +++++++++++-
 .../drill/exec/expr/fn/HiveFuncHolder.java      |  16 +-
 .../fn/HiveFunctionImplementationRegistry.java  | 164 ------------------
 .../exec/expr/fn/HiveFunctionRegistry.java      | 166 +++++++++++++++++++
 .../exec/expr/fn/PluggableFunctionRegistry.java |  39 +++++
 .../exec/planner/sql/DrillOperatorTable.java    |  29 +---
 .../exec/planner/sql/DrillSqlAggOperator.java   |   2 +-
 .../exec/planner/sql/DrillSqlOperator.java      |   2 +-
 .../exec/physical/impl/TestSimpleFunctions.java |   2 +-
 20 files changed, 535 insertions(+), 337 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/common/src/main/java/org/apache/drill/common/expression/FunctionHolderExpression.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/FunctionHolderExpression.java b/common/src/main/java/org/apache/drill/common/expression/FunctionHolderExpression.java
index eb87522..aa14e4d 100644
--- a/common/src/main/java/org/apache/drill/common/expression/FunctionHolderExpression.java
+++ b/common/src/main/java/org/apache/drill/common/expression/FunctionHolderExpression.java
@@ -19,6 +19,7 @@ package org.apache.drill.common.expression;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
+import org.apache.drill.common.expression.fn.FuncHolder;
 import org.apache.drill.common.expression.visitors.ExprVisitor;
 
 import java.util.List;
@@ -72,5 +73,7 @@ public abstract class FunctionHolderExpression extends LogicalExpressionBase {
    * @ return a copy of FunctionHolderExpression, with passed in argument list.
    */
   public abstract FunctionHolderExpression copy(List<LogicalExpression> args);
-  
+
+  /** Return the underlying function implementation holder. */
+  public abstract FuncHolder getHolder();
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/common/src/main/java/org/apache/drill/common/expression/fn/FuncHolder.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/fn/FuncHolder.java b/common/src/main/java/org/apache/drill/common/expression/fn/FuncHolder.java
new file mode 100644
index 0000000..a8824e0
--- /dev/null
+++ b/common/src/main/java/org/apache/drill/common/expression/fn/FuncHolder.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.common.expression.fn;
+
+/** This should be removed once common and exec/java-exec modules are merged (DRILL-507). */
+public interface FuncHolder {
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/DrillFuncHolderExpr.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/DrillFuncHolderExpr.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/DrillFuncHolderExpr.java
index 0341c45..785a8a6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/DrillFuncHolderExpr.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/DrillFuncHolderExpr.java
@@ -20,13 +20,10 @@ package org.apache.drill.exec.expr;
 import java.util.Iterator;
 import java.util.List;
 
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
 import org.apache.drill.common.expression.ExpressionPosition;
 import org.apache.drill.common.expression.FunctionHolderExpression;
 import org.apache.drill.common.expression.LogicalExpression;
-import org.apache.drill.common.expression.visitors.ExprVisitor;
+import org.apache.drill.common.expression.fn.FuncHolder;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.exec.expr.fn.DrillComplexWriterFuncHolder;
 import org.apache.drill.exec.expr.fn.DrillFuncHolder;
@@ -50,7 +47,8 @@ public class DrillFuncHolderExpr extends FunctionHolderExpression implements Ite
     return args.iterator();
   }
 
-  public DrillFuncHolder getHolder() {
+  @Override
+  public FuncHolder getHolder() {
     return holder;
   }
 
@@ -64,6 +62,7 @@ public class DrillFuncHolderExpr extends FunctionHolderExpression implements Ite
     return holder.isRandom();
   }
 
+  @Override
   public boolean argConstantOnly(int i) {
     return holder.isConstant(i);
   }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
index f2019b8..9cefaf3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
@@ -20,7 +20,6 @@ package org.apache.drill.exec.expr;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.io.input.NullReader;
 import org.apache.drill.common.expression.BooleanOperator;
 import org.apache.drill.common.expression.CastExpression;
 import org.apache.drill.common.expression.ConvertExpression;
@@ -48,21 +47,17 @@ import org.apache.drill.common.expression.ValueExpressions.QuotedString;
 import org.apache.drill.common.expression.ValueExpressions.TimeExpression;
 import org.apache.drill.common.expression.ValueExpressions.TimeStampExpression;
 import org.apache.drill.common.expression.visitors.AbstractExprVisitor;
-import org.apache.drill.common.types.TypeProtos.DataMode;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.common.types.Types;
 import org.apache.drill.exec.compile.sig.ConstantExpressionIdentifier;
 import org.apache.drill.exec.expr.ClassGenerator.BlockType;
 import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
-import org.apache.drill.exec.expr.fn.DrillFuncHolder;
-import org.apache.drill.exec.expr.fn.DrillBooleanOPHolder;
+import org.apache.drill.exec.expr.fn.AbstractFuncHolder;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
-import org.apache.drill.exec.expr.fn.HiveFuncHolder;
 import org.apache.drill.exec.physical.impl.filter.ReturnValueExpression;
 import org.apache.drill.exec.vector.ValueHolderHelper;
 import org.apache.drill.exec.vector.complex.reader.FieldReader;
-import org.apache.drill.exec.vector.complex.writer.FieldWriter;
 
 import com.google.common.collect.Lists;
 import com.sun.codemodel.JBlock;
@@ -112,45 +107,25 @@ public class EvaluationVisitor {
     @Override
     public HoldingContainer visitFunctionHolderExpression(FunctionHolderExpression holderExpr,
         ClassGenerator<?> generator) throws RuntimeException {
-      // TODO: hack: (Drill/Hive)FuncHolderExpr reference classes in exec so
-      // code generate methods can't be superclass FunctionHolderExpression
-      // which is defined in common
 
-      if (holderExpr instanceof DrillFuncHolderExpr) {
-        DrillFuncHolder holder = ((DrillFuncHolderExpr) holderExpr).getHolder();
-        
-
-        JVar[] workspaceVars = holder.renderStart(generator, null);
-
-        if (holder.isNested())
-          generator.getMappingSet().enterChild();
-
-        HoldingContainer[] args = new HoldingContainer[holderExpr.args.size()];
-        for (int i = 0; i < holderExpr.args.size(); i++) {
-          args[i] = holderExpr.args.get(i).accept(this, generator);
-        }
+      AbstractFuncHolder holder = (AbstractFuncHolder) holderExpr.getHolder();
 
-        holder.renderMiddle(generator, args, workspaceVars);
+      JVar[] workspaceVars = holder.renderStart(generator, null);
 
-        if (holder.isNested())
-          generator.getMappingSet().exitChild();
+      if (holder.isNested())
+        generator.getMappingSet().enterChild();
 
-        return holder.renderEnd(generator, args, workspaceVars);
-
-      } else if (holderExpr instanceof HiveFuncHolderExpr) {
+      HoldingContainer[] args = new HoldingContainer[holderExpr.args.size()];
+      for (int i = 0; i < holderExpr.args.size(); i++) {
+        args[i] = holderExpr.args.get(i).accept(this, generator);
+      }
 
-        HiveFuncHolder holder = ((HiveFuncHolderExpr) holderExpr).getHolder();
+      holder.renderMiddle(generator, args, workspaceVars);
 
-        HoldingContainer[] args = new HoldingContainer[holderExpr.args.size()];
-        for (int i = 0; i < holderExpr.args.size(); i++) {
-          args[i] = holderExpr.args.get(i).accept(this, generator);
-        }
-
-        return holder.renderEnd(generator, args, holder.renderStart(generator, null));
-      }
+      if (holder.isNested())
+        generator.getMappingSet().exitChild();
 
-      throw new UnsupportedOperationException(String.format("Unknown expression '%s'", holderExpr.getClass()
-          .getCanonicalName()));
+      return holder.renderEnd(generator, args, workspaceVars);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
index 4e10d20..4594c43 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
@@ -20,7 +20,6 @@ package org.apache.drill.exec.expr;
 import java.util.List;
 
 import com.google.common.base.Function;
-import com.google.common.base.Joiner;
 import com.google.common.base.Optional;
 import com.google.common.base.Predicate;
 import com.google.common.collect.ImmutableList;
@@ -37,7 +36,6 @@ import org.apache.drill.common.expression.IfExpression;
 import org.apache.drill.common.expression.IfExpression.IfCondition;
 import org.apache.drill.common.expression.LogicalExpression;
 import org.apache.drill.common.expression.NullExpression;
-import org.apache.drill.common.expression.PathSegment;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.expression.TypedNullConstant;
 import org.apache.drill.common.expression.ValueExpressions;
@@ -66,10 +64,10 @@ import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.common.types.Types;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.fn.AbstractFuncHolder;
 import org.apache.drill.exec.expr.fn.DrillComplexWriterFuncHolder;
 import org.apache.drill.exec.expr.fn.DrillFuncHolder;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
-import org.apache.drill.exec.expr.fn.HiveFuncHolder;
 import org.apache.drill.exec.record.TypedFieldId;
 import org.apache.drill.exec.record.VectorAccessible;
 import org.apache.drill.exec.resolver.FunctionResolver;
@@ -157,12 +155,8 @@ public class ExpressionTreeMaterializer {
       //replace with a new function call, since its argument could be changed.
       call = new FunctionCall(call.getName(), args, call.getPosition());
 
-      // First try to resolve as Drill function, if that fails try resolving as hive function
-      // TODO: Need to refactor the resolver to have generic interface for Drill and Hive functions
-
       FunctionResolver resolver = FunctionResolverFactory.getResolver(call);
-      DrillFuncHolder matchedFuncHolder =
-        resolver.getBestMatch(registry.getDrillRegistry().getMethods(call.getName()), call);
+      DrillFuncHolder matchedFuncHolder = registry.findDrillFunction(resolver, call);
 
       if (matchedFuncHolder instanceof DrillComplexWriterFuncHolder && ! allowComplexWriter) {
         errorCollector.addGeneralError(call.getPosition(), "Only ProjectRecordBatch could have complex writer function. You are using complex writer function " + call.getName() + " in a non-project operation!");
@@ -211,25 +205,26 @@ public class ExpressionTreeMaterializer {
             }
 
             FunctionCall castCall = new FunctionCall(castFuncName, castArgs, ExpressionPosition.UNKNOWN);
-            DrillFuncHolder matchedCastFuncHolder = resolver.getBestMatch(
-              registry.getDrillRegistry().getMethods(castFuncName), castCall);
+            DrillFuncHolder matchedCastFuncHolder = registry.findDrillFunction(resolver, castCall);
 
             if (matchedCastFuncHolder == null) {
               logFunctionResolutionError(errorCollector, castCall);
               return NullExpression.INSTANCE;
             }
 
-            argsWithCast.add(new DrillFuncHolderExpr(call.getName(), matchedCastFuncHolder, castArgs, ExpressionPosition.UNKNOWN));
+            argsWithCast.add(matchedCastFuncHolder.getExpr(call.getName(), castArgs, ExpressionPosition.UNKNOWN));
 
           }
         }
-        return new DrillFuncHolderExpr(call.getName(), matchedFuncHolder, argsWithCast, call.getPosition());
+
+        return matchedFuncHolder.getExpr(call.getName(), argsWithCast, call.getPosition());
       }
 
-      // as no drill func is found, search for the function in hive
-      HiveFuncHolder matchedHiveHolder = registry.getHiveRegistry().getFunction(call);
-      if (matchedHiveHolder != null)
-        return new HiveFuncHolderExpr(call.getName(), matchedHiveHolder, call.args, call.getPosition());
+      // as no drill func is found, search for a non-Drill function.
+      AbstractFuncHolder matchedNonDrillFuncHolder = registry.findNonDrillFunction(call);
+      if (matchedNonDrillFuncHolder != null) {
+        return matchedNonDrillFuncHolder.getExpr(call.getName(), call.args, call.getPosition());
+      }
 
       logFunctionResolutionError(errorCollector, call);
       return NullExpression.INSTANCE;
@@ -345,15 +340,14 @@ public class ExpressionTreeMaterializer {
       FunctionCall funcCall = new FunctionCall(funcName, args, ExpressionPosition.UNKNOWN);
       FunctionResolver resolver = FunctionResolverFactory.getResolver(funcCall);
 
-      DrillFuncHolder matchedConvertToNullableFuncHolder =
-          resolver.getBestMatch(registry.getDrillRegistry().getMethods(funcName), funcCall);
+      DrillFuncHolder matchedConvertToNullableFuncHolder = registry.findDrillFunction(resolver, funcCall);
 
       if (matchedConvertToNullableFuncHolder == null) {
         logFunctionResolutionError(errorCollector, funcCall);
         return NullExpression.INSTANCE;
       }
 
-      return new DrillFuncHolderExpr(funcName, matchedConvertToNullableFuncHolder, args, ExpressionPosition.UNKNOWN);
+      return matchedConvertToNullableFuncHolder.getExpr(funcName, args, ExpressionPosition.UNKNOWN);
     }
 
     private LogicalExpression rewriteNullExpression(LogicalExpression expr, MajorType type) {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
index c765d39..fd19e3d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
@@ -23,6 +23,7 @@ import java.util.List;
 import org.apache.drill.common.expression.ExpressionPosition;
 import org.apache.drill.common.expression.FunctionHolderExpression;
 import org.apache.drill.common.expression.LogicalExpression;
+import org.apache.drill.common.expression.fn.FuncHolder;
 import org.apache.drill.common.expression.visitors.ExprVisitor;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 
@@ -47,7 +48,7 @@ public class HiveFuncHolderExpr extends FunctionHolderExpression implements Iter
     return args.iterator();
   }
 
-  public HiveFuncHolder getHolder() {
+  public FuncHolder getHolder() {
     return holder;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/AbstractFuncHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/AbstractFuncHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/AbstractFuncHolder.java
new file mode 100644
index 0000000..bf6ec56
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/AbstractFuncHolder.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.expr.fn;
+
+import com.sun.codemodel.JVar;
+import org.apache.drill.common.expression.ExpressionPosition;
+import org.apache.drill.common.expression.FunctionHolderExpression;
+import org.apache.drill.common.expression.LogicalExpression;
+import org.apache.drill.common.expression.fn.FuncHolder;
+import org.apache.drill.exec.expr.ClassGenerator;
+import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
+
+import java.util.List;
+
+public abstract class AbstractFuncHolder implements FuncHolder {
+
+  public abstract JVar[] renderStart(ClassGenerator<?> g, HoldingContainer[] inputVariables);
+
+  public void renderMiddle(ClassGenerator<?> g, HoldingContainer[] inputVariables, JVar[] workspaceJVars) {
+    // default implementation is add no code
+  }
+
+  public abstract HoldingContainer renderEnd(ClassGenerator<?> g, HoldingContainer[] inputVariables, JVar[] workspaceJVars);
+
+  public boolean isNested() {
+    return false;
+  }
+
+  public abstract FunctionHolderExpression getExpr(String name, List<LogicalExpression> args, ExpressionPosition pos);
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFuncHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFuncHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFuncHolder.java
index f3c1e13..2906705 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFuncHolder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFuncHolder.java
@@ -21,6 +21,8 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.drill.common.expression.ExpressionPosition;
+import org.apache.drill.common.expression.FunctionHolderExpression;
 import org.apache.drill.common.expression.LogicalExpression;
 import org.apache.drill.common.types.TypeProtos;
 import org.apache.drill.common.types.TypeProtos.MajorType;
@@ -29,6 +31,7 @@ import org.apache.drill.common.types.Types;
 import org.apache.drill.exec.expr.ClassGenerator;
 import org.apache.drill.exec.expr.ClassGenerator.BlockType;
 import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
+import org.apache.drill.exec.expr.DrillFuncHolderExpr;
 import org.apache.drill.exec.expr.TypeHelper;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
@@ -46,7 +49,7 @@ import com.sun.codemodel.JExpr;
 import com.sun.codemodel.JType;
 import com.sun.codemodel.JVar;
 
-public abstract class DrillFuncHolder {
+public abstract class DrillFuncHolder extends AbstractFuncHolder {
 
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FunctionImplementationRegistry.class);
 
@@ -83,18 +86,27 @@ public abstract class DrillFuncHolder {
     return imports;
   }
 
+  @Override
   public JVar[] renderStart(ClassGenerator<?> g, HoldingContainer[] inputVariables) {
     return declareWorkspaceVariables(g);
   };
 
+  @Override
   public void renderMiddle(ClassGenerator<?> g, HoldingContainer[] inputVariables, JVar[] workspaceJVars) {
   };
 
+  @Override
   public abstract HoldingContainer renderEnd(ClassGenerator<?> g, HoldingContainer[] inputVariables,
       JVar[] workspaceJVars);
 
+  @Override
   public abstract boolean isNested();
 
+  @Override
+  public FunctionHolderExpression getExpr(String name, List<LogicalExpression> args, ExpressionPosition pos) {
+    return new DrillFuncHolderExpr(name, this, args, pos);
+  }
+
   public boolean isAggregating() {
     return false;
   }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFunctionImplementationRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFunctionImplementationRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFunctionImplementationRegistry.java
deleted file mode 100644
index 8db2abd..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFunctionImplementationRegistry.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.expr.fn;
-
-import java.util.List;
-import java.util.Set;
-
-import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.util.PathScanner;
-import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.expr.DrillFunc;
-
-import com.google.common.collect.ArrayListMultimap;
-
-public class DrillFunctionImplementationRegistry {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillFunctionImplementationRegistry.class);
-
-  private ArrayListMultimap<String, DrillFuncHolder> methods = ArrayListMultimap.create();
-
-  public DrillFunctionImplementationRegistry(DrillConfig config){
-    FunctionConverter converter = new FunctionConverter();
-    Set<Class<? extends DrillFunc>> providerClasses = PathScanner.scanForImplementations(DrillFunc.class, config.getStringList(ExecConstants.FUNCTION_PACKAGES));
-    for (Class<? extends DrillFunc> clazz : providerClasses) {
-      DrillFuncHolder holder = converter.getHolder(clazz);
-      if(holder != null){
-        // register handle for each name the function can be referred to
-        String[] names = holder.getRegisteredNames();
-        for(String name : names) methods.put(name.toLowerCase(), holder);
-      }else{
-        logger.warn("Unable to initialize function for class {}", clazz.getName());
-      }
-    }
-    if (logger.isTraceEnabled()) {
-      StringBuilder allFunctions = new StringBuilder();
-      for (DrillFuncHolder method: methods.values()) {
-        allFunctions.append(method.toString()).append("\n");
-      }
-      logger.trace("Registered functions: [\n{}]", allFunctions);
-    }
-  }
-
-  public ArrayListMultimap<String, DrillFuncHolder> getMethods() {
-    return this.methods;
-  }
-
-  /** Returns functions with given name. Function name is case insensitive. */
-  public List<DrillFuncHolder> getMethods(String name) {
-    return this.methods.get(name.toLowerCase());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFunctionRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFunctionRegistry.java
new file mode 100644
index 0000000..2e7a104
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFunctionRegistry.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.expr.fn;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import com.google.common.collect.Sets;
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.util.PathScanner;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.expr.DrillFunc;
+
+import com.google.common.collect.ArrayListMultimap;
+import org.apache.drill.exec.planner.sql.DrillOperatorTable;
+import org.apache.drill.exec.planner.sql.DrillSqlAggOperator;
+import org.apache.drill.exec.planner.sql.DrillSqlOperator;
+import org.eigenbase.sql.SqlOperator;
+
+public class DrillFunctionRegistry {
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillFunctionRegistry.class);
+
+  private ArrayListMultimap<String, DrillFuncHolder> methods = ArrayListMultimap.create();
+
+  public DrillFunctionRegistry(DrillConfig config){
+    FunctionConverter converter = new FunctionConverter();
+    Set<Class<? extends DrillFunc>> providerClasses = PathScanner.scanForImplementations(DrillFunc.class, config.getStringList(ExecConstants.FUNCTION_PACKAGES));
+    for (Class<? extends DrillFunc> clazz : providerClasses) {
+      DrillFuncHolder holder = converter.getHolder(clazz);
+      if(holder != null){
+        // register handle for each name the function can be referred to
+        String[] names = holder.getRegisteredNames();
+        for(String name : names) methods.put(name.toLowerCase(), holder);
+      }else{
+        logger.warn("Unable to initialize function for class {}", clazz.getName());
+      }
+    }
+    if (logger.isTraceEnabled()) {
+      StringBuilder allFunctions = new StringBuilder();
+      for (DrillFuncHolder method: methods.values()) {
+        allFunctions.append(method.toString()).append("\n");
+      }
+      logger.trace("Registered functions: [\n{}]", allFunctions);
+    }
+  }
+
+  /** Returns functions with given name. Function name is case insensitive. */
+  public List<DrillFuncHolder> getMethods(String name) {
+    return this.methods.get(name.toLowerCase());
+  }
+
+  public void register(DrillOperatorTable operatorTable) {
+    SqlOperator op;
+    for (Entry<String, Collection<DrillFuncHolder>> function : methods.asMap().entrySet()) {
+      Set<Integer> argCounts = Sets.newHashSet();
+      String name = function.getKey().toUpperCase();
+      for (DrillFuncHolder f : function.getValue()) {
+        if (argCounts.add(f.getParamCount())) {
+          if (f.isAggregating()) {
+            op = new DrillSqlAggOperator(name, f.getParamCount());
+          } else {
+            op = new DrillSqlOperator(name, f.getParamCount());
+          }
+          operatorTable.add(function.getKey(), op);
+        }
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionGenerationHelper.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionGenerationHelper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionGenerationHelper.java
index 25eff90..4a63aac 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionGenerationHelper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionGenerationHelper.java
@@ -28,6 +28,7 @@ import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.common.types.Types;
 import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
+import org.apache.drill.exec.expr.DrillFunc;
 import org.apache.drill.exec.expr.DrillFuncHolderExpr;
 import org.apache.drill.exec.expr.HoldingContainerExpression;
 
@@ -57,13 +58,12 @@ public class FunctionGenerationHelper {
       argTypes.add(c.getMajorType());
       argExpressions.add(new HoldingContainerExpression(c));
     }
-    
-    for (DrillFuncHolder h : registry.getDrillRegistry().getMethods(name)) {
-      if (h.matches(returnType, argTypes)) {
-        return new DrillFuncHolderExpr(name, h, argExpressions, ExpressionPosition.UNKNOWN);
-      }
+
+    DrillFuncHolder holder = registry.findExactMatchingDrillFunction(name, argTypes, returnType);
+    if (holder != null) {
+      return holder.getExpr(name, argExpressions, ExpressionPosition.UNKNOWN);
     }
-    
+
     StringBuilder sb = new StringBuilder();
     sb.append("Failure finding function that runtime code generation expected.  Signature: ");
     sb.append(name);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionImplementationRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionImplementationRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionImplementationRegistry.java
index 7a28a2f..d7bc36b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionImplementationRegistry.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionImplementationRegistry.java
@@ -17,22 +17,113 @@
  */
 package org.apache.drill.exec.expr.fn;
 
+import com.google.common.collect.Lists;
 import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.expression.FunctionCall;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.common.util.PathScanner;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.planner.sql.DrillOperatorTable;
+import org.apache.drill.exec.resolver.FunctionResolver;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.util.List;
+import java.util.Set;
 
 public class FunctionImplementationRegistry {
-  private DrillFunctionImplementationRegistry drillFuncRegistry;
-  private HiveFunctionImplementationRegistry hiveFuncRegistry;
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FunctionImplementationRegistry.class);
+
+  private DrillFunctionRegistry drillFuncRegistry;
+  private List<PluggableFunctionRegistry> pluggableFuncRegistries = Lists.newArrayList();
 
   public FunctionImplementationRegistry(DrillConfig config){
-    drillFuncRegistry = new DrillFunctionImplementationRegistry(config);
-    hiveFuncRegistry = new HiveFunctionImplementationRegistry(config);
+    drillFuncRegistry = new DrillFunctionRegistry(config);
+
+    Set<Class<? extends PluggableFunctionRegistry>> registryClasses = PathScanner.scanForImplementations(
+        PluggableFunctionRegistry.class, config.getStringList(ExecConstants.FUNCTION_PACKAGES));
+
+    for (Class<? extends PluggableFunctionRegistry> clazz : registryClasses) {
+      for (Constructor<?> c : clazz.getConstructors()) {
+        Class<?>[] params = c.getParameterTypes();
+        if (params.length != 1 || params[0] != DrillConfig.class) {
+          logger.warn("Skipping PluggableFunctionRegistry constructor {} for class {} since it doesn't implement a " +
+              "[constructor(DrillConfig)]", c, clazz);
+          continue;
+        }
+
+        try {
+          PluggableFunctionRegistry registry = (PluggableFunctionRegistry)c.newInstance(config);
+          pluggableFuncRegistries.add(registry);
+        } catch(InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
+          logger.warn("Unable to instantiate PluggableFunctionRegistry class '{}'. Skipping it.", clazz, e);
+        }
+
+        break;
+      }
+    }
   }
 
-  public DrillFunctionImplementationRegistry getDrillRegistry() {
-    return drillFuncRegistry;
+  /**
+   * Register functions in given operator table.
+   * @param operatorTable
+   */
+  public void register(DrillOperatorTable operatorTable) {
+    // Register Drill functions first and move to pluggable function registries.
+    drillFuncRegistry.register(operatorTable);
+
+    for(PluggableFunctionRegistry registry : pluggableFuncRegistries) {
+      registry.register(operatorTable);
+    }
   }
 
-  public HiveFunctionImplementationRegistry getHiveRegistry() {
-    return hiveFuncRegistry;
+  /**
+   * Using the given <code>functionResolver</code> find Drill function implementation for given
+   * <code>functionCall</code>
+   *
+   * @param functionResolver
+   * @param functionCall
+   * @return
+   */
+  public DrillFuncHolder findDrillFunction(FunctionResolver functionResolver, FunctionCall functionCall) {
+    return functionResolver.getBestMatch(drillFuncRegistry.getMethods(functionCall.getName()), functionCall);
+  }
+
+  /**
+   * Find the Drill function implementation that matches the name, arg types and return type.
+   * @param name
+   * @param argTypes
+   * @param returnType
+   * @return
+   */
+  public DrillFuncHolder findExactMatchingDrillFunction(String name, List<MajorType> argTypes, MajorType returnType) {
+    for (DrillFuncHolder h : drillFuncRegistry.getMethods(name)) {
+      if (h.matches(returnType, argTypes)) {
+        return h;
+      }
+    }
+
+    return null;
+  }
+
+  /**
+   * Find function implementation for given <code>functionCall</code> in non-Drill function registries such as Hive UDF
+   * registry.
+   *
+   * Note: Order of searching is same as order of {@link org.apache.drill.exec.expr.fn.PluggableFunctionRegistry}
+   * implementations found on classpath.
+   *
+   * @param functionCall
+   * @return
+   */
+  public AbstractFuncHolder findNonDrillFunction(FunctionCall functionCall) {
+    for(PluggableFunctionRegistry registry : pluggableFuncRegistries) {
+      AbstractFuncHolder h = registry.getFunction(functionCall);
+      if (h != null) {
+        return h;
+      }
+    }
+
+    return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
index 61c3e54..813d4c5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
@@ -26,20 +26,25 @@ import com.sun.codemodel.JExpr;
 import com.sun.codemodel.JInvocation;
 import com.sun.codemodel.JTryBlock;
 import com.sun.codemodel.JVar;
+import org.apache.drill.common.expression.ExpressionPosition;
+import org.apache.drill.common.expression.FunctionHolderExpression;
+import org.apache.drill.common.expression.LogicalExpression;
 import org.apache.drill.common.types.TypeProtos;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.exec.expr.ClassGenerator;
 import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
+import org.apache.drill.exec.expr.HiveFuncHolderExpr;
 import org.apache.drill.exec.expr.TypeHelper;
 import org.apache.drill.exec.expr.fn.impl.hive.DrillDeferredObject;
 import org.apache.drill.exec.expr.fn.impl.hive.ObjectInspectorHelper;
-import org.apache.drill.exec.expr.holders.ValueHolder;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 
-public class HiveFuncHolder {
+import java.util.List;
+
+public class HiveFuncHolder extends AbstractFuncHolder {
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FunctionImplementationRegistry.class);
 
   private MajorType[] argTypes;
@@ -110,6 +115,7 @@ public class HiveFuncHolder {
    * Start generating code
    * @return workspace variables
    */
+  @Override
   public JVar[] renderStart(ClassGenerator<?> g, HoldingContainer[] inputVariables){
     JVar[] workspaceJVars = new JVar[5];
 
@@ -130,6 +136,7 @@ public class HiveFuncHolder {
    * @param workspaceJVars
    * @return HoldingContainer for return value
    */
+  @Override
   public HoldingContainer renderEnd(ClassGenerator<?> g, HoldingContainer[] inputVariables, JVar[]  workspaceJVars) {
     generateSetup(g, workspaceJVars);
     return generateEval(g, inputVariables, workspaceJVars);
@@ -146,6 +153,11 @@ public class HiveFuncHolder {
     }
   }
 
+  @Override
+  public FunctionHolderExpression getExpr(String name, List<LogicalExpression> args, ExpressionPosition pos) {
+    return new HiveFuncHolderExpr(name, this, args, pos);
+  }
+
   private void generateSetup(ClassGenerator<?> g, JVar[] workspaceJVars) {
     JCodeModel m = g.getModel();
     JBlock sub = new JBlock(true, true);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionImplementationRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionImplementationRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionImplementationRegistry.java
deleted file mode 100644
index a72b7de..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionImplementationRegistry.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.expr.fn;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.expression.FunctionCall;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.TypeProtos.MajorType;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.common.util.PathScanner;
-import org.apache.drill.exec.expr.fn.impl.hive.ObjectInspectorHelper;
-import org.apache.hadoop.hive.ql.exec.Description;
-import org.apache.hadoop.hive.ql.exec.UDF;
-import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
-import org.apache.hadoop.hive.ql.udf.UDFType;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-
-import com.google.common.collect.ArrayListMultimap;
-
-public class HiveFunctionImplementationRegistry {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveFunctionImplementationRegistry.class);
-
-  private ArrayListMultimap<String, Class<? extends GenericUDF>> methodsGenericUDF = ArrayListMultimap.create();
-  private ArrayListMultimap<String, Class<? extends UDF>> methodsUDF = ArrayListMultimap.create();
-  private HashSet<Class<?>> nonDeterministicUDFs = new HashSet<>();
-
-  /**
-   * Scan the classpath for implementation of GenericUDF/UDF interfaces,
-   * extracts function annotation and store the
-   * (function name) --> (implementation class) mappings.
-   * @param config
-   */
-  public HiveFunctionImplementationRegistry(DrillConfig config){
-    Set<Class<? extends GenericUDF>> genericUDFClasses = PathScanner.scanForImplementations(GenericUDF.class, null);
-    for (Class<? extends GenericUDF> clazz : genericUDFClasses)
-      register(clazz, methodsGenericUDF);
-
-    Set<Class<? extends UDF>> udfClasses = PathScanner.scanForImplementations(UDF.class, null);
-    for (Class<? extends UDF> clazz : udfClasses)
-      register(clazz, methodsUDF);
-  }
-
-  private <C,I> void register(Class<? extends I> clazz, ArrayListMultimap<String,Class<? extends I>> methods) {
-    Description desc = clazz.getAnnotation(Description.class);
-    String[] names;
-    if(desc != null){
-      names = desc.name().split(",");
-      for(int i=0; i<names.length; i++) names[i] = names[i].trim();
-    }else{
-      names = new String[]{clazz.getName().replace('.', '_')};
-    }
-    
-    UDFType type = clazz.getAnnotation(UDFType.class);
-    if (type != null && type.deterministic()) nonDeterministicUDFs.add(clazz);
-
-
-    for(int i=0; i<names.length;i++){
-      methods.put(names[i].toLowerCase(), clazz);
-    }
-  }
-
-  public ArrayListMultimap<String, Class<? extends GenericUDF>> getGenericUDFs() {
-    return methodsGenericUDF;
-  }
-
-  public ArrayListMultimap<String, Class<? extends UDF>> getUDFs() {
-    return methodsUDF;
-  }
-
-  /**
-   * Find the UDF class for given function name and check if it accepts the given input argument
-   * types. If a match is found, create a holder and return
-   * @param call
-   * @return
-   */
-  public HiveFuncHolder getFunction(FunctionCall call){
-    HiveFuncHolder holder;
-    MajorType[] argTypes = new MajorType[call.args.size()];
-    ObjectInspector[] argOIs = new ObjectInspector[call.args.size()];
-    for(int i=0; i<call.args.size(); i++) {
-      argTypes[i] = call.args.get(i).getMajorType();
-      argOIs[i] = ObjectInspectorHelper.getDrillObjectInspector(argTypes[i].getMinorType());
-    }
-
-    String funcName = call.getName().toLowerCase();
-
-    // search in GenericUDF list
-    for(Class<? extends GenericUDF> clazz: methodsGenericUDF.get(funcName)) {
-      holder = matchAndCreateGenericUDFHolder(clazz, argTypes, argOIs);
-      if(holder != null)
-        return holder;
-    }
-
-    // search in UDF list
-    for (Class<? extends UDF> clazz : methodsUDF.get(funcName)) {
-      holder = matchAndCreateUDFHolder(call.getName(), clazz, argTypes, argOIs);
-      if (holder != null)
-        return holder;
-    }
-
-    return null;
-  }
-
-  private HiveFuncHolder matchAndCreateGenericUDFHolder(Class<? extends GenericUDF> udfClazz,
-                                              MajorType[] argTypes,
-                                              ObjectInspector[] argOIs) {
-    // probe UDF to find if the arg types and acceptable
-    // if acceptable create a holder object
-    try {
-      GenericUDF udfInstance = udfClazz.newInstance();
-      ObjectInspector returnOI = udfInstance.initialize(argOIs);
-      return new HiveFuncHolder(
-        udfClazz,
-        argTypes,
-        returnOI,
-        Types.optional(ObjectInspectorHelper.getDrillType(returnOI)),
-        nonDeterministicUDFs.contains(udfClazz));
-    } catch(IllegalAccessException | InstantiationException e) {
-      logger.debug("Failed to instantiate class", e);
-    } catch(Exception e) { /*ignore this*/ }
-
-    return null;
-  }
-
-  private HiveFuncHolder matchAndCreateUDFHolder(String udfName,
-                                                 Class<? extends UDF> udfClazz,
-                                                 MajorType[] argTypes,
-                                                 ObjectInspector[] argOIs) {
-    try {
-      GenericUDF udfInstance = new GenericUDFBridge(udfName, false/* is operator */, udfClazz);
-      ObjectInspector returnOI = udfInstance.initialize(argOIs);
-
-      return new HiveFuncHolder(
-        udfName,
-        udfClazz,
-        argTypes,
-        returnOI,
-        Types.optional(ObjectInspectorHelper.getDrillType(returnOI)),
-        nonDeterministicUDFs.contains(udfClazz));
-    } catch(Exception e) { /*ignore this*/ }
-
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java
new file mode 100644
index 0000000..72cadc9
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.expr.fn;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import com.google.common.collect.Sets;
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.expression.FunctionCall;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.common.util.PathScanner;
+import org.apache.drill.exec.expr.fn.impl.hive.ObjectInspectorHelper;
+import org.apache.drill.exec.planner.sql.DrillOperatorTable;
+import org.apache.drill.exec.planner.sql.HiveUDFOperator;
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDF;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.udf.UDFType;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+
+import com.google.common.collect.ArrayListMultimap;
+
+public class HiveFunctionRegistry implements PluggableFunctionRegistry{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveFunctionRegistry.class);
+
+  private ArrayListMultimap<String, Class<? extends GenericUDF>> methodsGenericUDF = ArrayListMultimap.create();
+  private ArrayListMultimap<String, Class<? extends UDF>> methodsUDF = ArrayListMultimap.create();
+  private HashSet<Class<?>> nonDeterministicUDFs = new HashSet<>();
+
+  /**
+   * Scan the classpath for implementation of GenericUDF/UDF interfaces,
+   * extracts function annotation and store the
+   * (function name) --> (implementation class) mappings.
+   * @param config
+   */
+  public HiveFunctionRegistry(DrillConfig config){
+    Set<Class<? extends GenericUDF>> genericUDFClasses = PathScanner.scanForImplementations(GenericUDF.class, null);
+    for (Class<? extends GenericUDF> clazz : genericUDFClasses)
+      register(clazz, methodsGenericUDF);
+
+    Set<Class<? extends UDF>> udfClasses = PathScanner.scanForImplementations(UDF.class, null);
+    for (Class<? extends UDF> clazz : udfClasses)
+      register(clazz, methodsUDF);
+  }
+
+  @Override
+  public void register(DrillOperatorTable operatorTable) {
+    for (String name : Sets.union(methodsGenericUDF.asMap().keySet(), methodsUDF.asMap().keySet())) {
+      operatorTable.add(name, new HiveUDFOperator(name.toUpperCase()));
+    }
+  }
+
+  private <C,I> void register(Class<? extends I> clazz, ArrayListMultimap<String,Class<? extends I>> methods) {
+    Description desc = clazz.getAnnotation(Description.class);
+    String[] names;
+    if(desc != null){
+      names = desc.name().split(",");
+      for(int i=0; i<names.length; i++) names[i] = names[i].trim();
+    }else{
+      names = new String[]{clazz.getName().replace('.', '_')};
+    }
+    
+    UDFType type = clazz.getAnnotation(UDFType.class);
+    if (type != null && type.deterministic()) nonDeterministicUDFs.add(clazz);
+
+
+    for(int i=0; i<names.length;i++){
+      methods.put(names[i].toLowerCase(), clazz);
+    }
+  }
+
+  /**
+   * Find the UDF class for given function name and check if it accepts the given input argument
+   * types. If a match is found, create a holder and return
+   * @param call
+   * @return
+   */
+  @Override
+  public HiveFuncHolder getFunction(FunctionCall call){
+    HiveFuncHolder holder;
+    MajorType[] argTypes = new MajorType[call.args.size()];
+    ObjectInspector[] argOIs = new ObjectInspector[call.args.size()];
+    for(int i=0; i<call.args.size(); i++) {
+      argTypes[i] = call.args.get(i).getMajorType();
+      argOIs[i] = ObjectInspectorHelper.getDrillObjectInspector(argTypes[i].getMinorType());
+    }
+
+    String funcName = call.getName().toLowerCase();
+
+    // search in GenericUDF list
+    for(Class<? extends GenericUDF> clazz: methodsGenericUDF.get(funcName)) {
+      holder = matchAndCreateGenericUDFHolder(clazz, argTypes, argOIs);
+      if(holder != null)
+        return holder;
+    }
+
+    // search in UDF list
+    for (Class<? extends UDF> clazz : methodsUDF.get(funcName)) {
+      holder = matchAndCreateUDFHolder(call.getName(), clazz, argTypes, argOIs);
+      if (holder != null)
+        return holder;
+    }
+
+    return null;
+  }
+
+  private HiveFuncHolder matchAndCreateGenericUDFHolder(Class<? extends GenericUDF> udfClazz,
+                                              MajorType[] argTypes,
+                                              ObjectInspector[] argOIs) {
+    // probe UDF to find if the arg types and acceptable
+    // if acceptable create a holder object
+    try {
+      GenericUDF udfInstance = udfClazz.newInstance();
+      ObjectInspector returnOI = udfInstance.initialize(argOIs);
+      return new HiveFuncHolder(
+        udfClazz,
+        argTypes,
+        returnOI,
+        Types.optional(ObjectInspectorHelper.getDrillType(returnOI)),
+        nonDeterministicUDFs.contains(udfClazz));
+    } catch(IllegalAccessException | InstantiationException e) {
+      logger.debug("Failed to instantiate class", e);
+    } catch(Exception e) { /*ignore this*/ }
+
+    return null;
+  }
+
+  private HiveFuncHolder matchAndCreateUDFHolder(String udfName,
+                                                 Class<? extends UDF> udfClazz,
+                                                 MajorType[] argTypes,
+                                                 ObjectInspector[] argOIs) {
+    try {
+      GenericUDF udfInstance = new GenericUDFBridge(udfName, false/* is operator */, udfClazz);
+      ObjectInspector returnOI = udfInstance.initialize(argOIs);
+
+      return new HiveFuncHolder(
+        udfName,
+        udfClazz,
+        argTypes,
+        returnOI,
+        Types.optional(ObjectInspectorHelper.getDrillType(returnOI)),
+        nonDeterministicUDFs.contains(udfClazz));
+    } catch(Exception e) { /*ignore this*/ }
+
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/PluggableFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/PluggableFunctionRegistry.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/PluggableFunctionRegistry.java
new file mode 100644
index 0000000..547e65f
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/PluggableFunctionRegistry.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.expr.fn;
+
+import org.apache.drill.common.expression.FunctionCall;
+import org.apache.drill.exec.planner.sql.DrillOperatorTable;
+
+public interface PluggableFunctionRegistry {
+
+  /**
+   * Register functions in given operator table.
+   * @param operatorTable
+   */
+  public void register(DrillOperatorTable operatorTable);
+
+  /**
+   * If exists return the function implementation holder matching the given <code>functionCall</code> expression,
+   * otherwise null.
+   *
+   * @param functionCall
+   * @return
+   */
+  public AbstractFuncHolder getFunction(FunctionCall functionCall);
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
index 29161aa..9ffbb06 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillOperatorTable.java
@@ -46,31 +46,12 @@ public class DrillOperatorTable extends SqlStdOperatorTable {
     operators = Lists.newArrayList();
     operators.addAll(inner.getOperatorList());
 
-    for (Map.Entry<String, Collection<DrillFuncHolder>> function : registry.getDrillRegistry().getMethods().asMap().entrySet()) {
-      Set<Integer> argCounts = Sets.newHashSet();
-      String name = function.getKey().toUpperCase();
-      for (DrillFuncHolder f : function.getValue()) {
-        if (argCounts.add(f.getParamCount())) {
-          SqlOperator op = null;
-          if (f.isAggregating()) {
-            op = new DrillSqlAggOperator(name, f.getParamCount());
-          } else {
-            op = new DrillSqlOperator(name, f.getParamCount());
-          }
-          operators.add(op);
-          opMap.put(function.getKey(), op);
-        }
-      }
-    }
-
-    for (String name : Sets.union(
-        registry.getHiveRegistry().getGenericUDFs().asMap().keySet(),
-        registry.getHiveRegistry().getUDFs().asMap().keySet())) {
+    registry.register(this);
+  }
 
-      SqlOperator op = new HiveUDFOperator(name.toUpperCase());
-      operators.add(op);
-      opMap.put(name, op);
-    }
+  public void add(String name, SqlOperator op) {
+    operators.add(op);
+    opMap.put(name, op);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperator.java
index 26f5af2..299712e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlAggOperator.java
@@ -52,7 +52,7 @@ public class DrillSqlAggOperator extends SqlAggFunction {
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillSqlAggOperator.class);
 
   
-  DrillSqlAggOperator(String name, int argCount) {
+  public DrillSqlAggOperator(String name, int argCount) {
     super(name, new SqlIdentifier(name, SqlParserPos.ZERO), SqlKind.OTHER_FUNCTION, DynamicReturnType.INSTANCE, null, new Checker(argCount), SqlFunctionCategory.USER_DEFINED_FUNCTION);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperator.java
index 1f39c6e..2eeb26e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlOperator.java
@@ -32,7 +32,7 @@ import org.eigenbase.sql.validate.SqlValidatorScope;
 public class DrillSqlOperator extends SqlFunction {
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillSqlOperator.class);
 
-  DrillSqlOperator(String name, int argCount) {
+  public DrillSqlOperator(String name, int argCount) {
     super(new SqlIdentifier(name, SqlParserPos.ZERO), DynamicReturnType.INSTANCE, null, new Checker(argCount), null, SqlFunctionCategory.USER_DEFINED_FUNCTION);
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/026d51a2/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestSimpleFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestSimpleFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestSimpleFunctions.java
index 7adef02..b032c51 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestSimpleFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestSimpleFunctions.java
@@ -143,7 +143,7 @@ public class TestSimpleFunctions extends ExecTest {
         ExpressionPosition.UNKNOWN
     );
     FunctionResolver resolver = FunctionResolverFactory.getResolver(call);
-    DrillFuncHolder matchedFuncHolder = resolver.getBestMatch(registry.getDrillRegistry().getMethods(call.getName()), call);
+    DrillFuncHolder matchedFuncHolder = registry.findDrillFunction(resolver, call);
     assertEquals( expectedBestInputMode, matchedFuncHolder.getParmMajorType(0).getMode());
   }
 


[06/32] git commit: DRILL-1007: Unify handling of negative values in Decimal28 and Decimal38 data type

Posted by ja...@apache.org.
DRILL-1007: Unify handling of negative values in Decimal28 and Decimal38 data type


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/0dec032f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/0dec032f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/0dec032f

Branch: refs/heads/master
Commit: 0dec032fb922b8eaae975d33adeb06e2407797ed
Parents: 199f467
Author: Mehant Baid <me...@gmail.com>
Authored: Tue Jun 17 19:06:48 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Wed Jun 18 20:28:59 2014 -0700

----------------------------------------------------------------------
 .../templates/ConvertToNullableHolder.java      |  2 +-
 .../Decimal/CastDecimalDenseDecimalSparse.java  |  2 +-
 .../templates/Decimal/CastDecimalInt.java       |  2 +-
 .../templates/Decimal/CastDecimalSimilar.java   |  3 +-
 .../Decimal/CastDecimalSparseDecimalDense.java  |  3 +-
 .../templates/Decimal/CastDecimalVarchar.java   |  2 +-
 .../templates/Decimal/CastFloatDecimal.java     |  8 +-
 .../templates/Decimal/CastIntDecimal.java       |  2 +-
 .../templates/Decimal/CastSrcDecimalSimple.java |  4 +-
 .../templates/Decimal/CastVarCharDecimal.java   |  4 +-
 .../templates/Decimal/DecimalFunctions.java     | 69 ++++++++-------
 .../templates/DecimalAggrTypeFunctions1.java    | 30 +++----
 .../codegen/templates/FixedValueVectors.java    | 42 ---------
 .../templates/ParquetOutputRecordWriter.java    |  2 +-
 .../main/codegen/templates/ValueHolders.java    | 13 ++-
 .../drill/exec/vector/ValueHolderHelper.java    |  4 +-
 .../drill/exec/physical/impl/TestDecimal.java   | 91 --------------------
 17 files changed, 87 insertions(+), 196 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/ConvertToNullableHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/ConvertToNullableHolder.java b/exec/java-exec/src/main/codegen/templates/ConvertToNullableHolder.java
index 548d645..0d2be65 100644
--- a/exec/java-exec/src/main/codegen/templates/ConvertToNullableHolder.java
+++ b/exec/java-exec/src/main/codegen/templates/ConvertToNullableHolder.java
@@ -57,7 +57,7 @@ public class ${className} implements DrillSimpleFunc {
     output.scale = input.scale;
     output.precision = input.precision;
     <#if minor.class.startsWith("Decimal28") || minor.class.startsWith("Decimal38")>
-    output.sign = input.sign;
+    output.setSign(input.getSign());
     output.start = input.start;
     output.buffer = input.buffer;
     <#else>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalDenseDecimalSparse.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalDenseDecimalSparse.java b/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalDenseDecimalSparse.java
index a486cf2..e9c03c8 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalDenseDecimalSparse.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalDenseDecimalSparse.java
@@ -63,7 +63,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc{
         }
         out.scale = (int) scale.value;
         out.precision = (int) precision.value;
-        out.sign = in.sign;
+        out.setSign(in.getSign());
 
         /* We store base 1 Billion integers in our representation, which requires
          * 30 bits, but a typical integer requires 32 bits. In our dense representation

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalInt.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalInt.java b/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalInt.java
index a89965f..cf7a634 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalInt.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalInt.java
@@ -96,7 +96,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc {
             out.value = (${type.javatype}) ((out.value * org.apache.drill.common.util.DecimalUtility.DIGITS_BASE) + in.getInteger(i));
         }
 
-        if (in.sign == true) {
+        if (in.getSign() == true) {
             out.value *= -1;
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalSimilar.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalSimilar.java b/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalSimilar.java
index a59cfdb..9a2d3e5 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalSimilar.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalSimilar.java
@@ -61,7 +61,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc{
         out.start = 0;
         out.scale = (int) scale.value;
         out.precision = (int) precision.value;
-        out.sign = in.sign;
+        boolean sign = (in.getSign());
 
         // Re initialize the buffer everytime
         for (int i = 0; i < ${type.arraySize}; i++) {
@@ -79,6 +79,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc{
         if (in.scale != out.scale) {
           org.apache.drill.common.util.DecimalUtility.roundDecimal(out.buffer, out.start, out.nDecimalDigits, out.scale, in.scale);
         }
+        out.setSign(sign);
     }
 }
 </#if> <#-- type.major -->

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalSparseDecimalDense.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalSparseDecimalDense.java b/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalSparseDecimalDense.java
index 4798b34..e9e9711 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalSparseDecimalDense.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalSparseDecimalDense.java
@@ -66,8 +66,6 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc{
         out.scale = (int) scale.value;
         out.precision = (int) precision.value;
 
-        out.sign = in.sign;
-
         /* Before converting from a sparse representation to a dense representation
          * we need to convert it to an intermediate representation. In the sparse
          * representation we separate out the scale and the integer part of the decimal
@@ -169,6 +167,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc{
 
           // Set the bytes in the buffer
           out.buffer.setBytes(dstIndex, intermediateBytes, 1, (size - 1));
+          out.setSign(in.getSign());
     }
 }
 </#if>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalVarchar.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalVarchar.java b/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalVarchar.java
index 783165f..dc8eca5 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalVarchar.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/CastDecimalVarchar.java
@@ -146,7 +146,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc {
 
 
         // If we have valid digits print '-' sign
-        if ((in.sign == true) && index < ${type.arraySize}) {
+        if ((in.getSign() == true) && index < ${type.arraySize}) {
             str.append("-");
         }
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/Decimal/CastFloatDecimal.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/CastFloatDecimal.java b/exec/java-exec/src/main/codegen/templates/Decimal/CastFloatDecimal.java
index 9ebb86f..903634b 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/CastFloatDecimal.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/CastFloatDecimal.java
@@ -70,14 +70,14 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc {
         out.start = 0;
         out.buffer = buffer;
 
-        if (in.value < 0) {
-            out.sign = true;
-        }
-
         // Initialize the buffer
         for (int i = 0; i < ${type.arraySize}; i++) {
             out.setInteger(i, 0);
         }
+
+        if (in.value < 0) {
+            out.setSign(true);
+        }
         // Assign the integer part of the decimal to the output holder
         org.apache.drill.common.util.DecimalUtility.getSparseFromBigDecimal(new java.math.BigDecimal(String.valueOf(in.value)), out.buffer, out.start, out.scale, out.precision, out.nDecimalDigits);
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/Decimal/CastIntDecimal.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/CastIntDecimal.java b/exec/java-exec/src/main/codegen/templates/Decimal/CastIntDecimal.java
index 979e7e2..78bde2e 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/CastIntDecimal.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/CastIntDecimal.java
@@ -79,7 +79,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc {
 
         // check if input is a negative number and store the sign
         if (in.value < 0) {
-            out.sign = true;
+            out.setSign(true);
         }
 
         // Figure out how many array positions to be left for the scale part

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/Decimal/CastSrcDecimalSimple.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/CastSrcDecimalSimple.java b/exec/java-exec/src/main/codegen/templates/Decimal/CastSrcDecimalSimple.java
index b298c66..7ef806f 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/CastSrcDecimalSimple.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/CastSrcDecimalSimple.java
@@ -72,7 +72,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc {
 
         out.buffer = buffer;
         out.start = 0;
-        out.sign = (in.value < 0) ? true : false;
+        out.setSign((in.value < 0));
 
         /* Since we will be dividing the decimal value with base 1 billion
          * we don't want negative results if the decimal is negative.
@@ -169,7 +169,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc{
 
         out.buffer = buffer;
         out.start = 0;
-        out.sign = (in.value < 0) ? true : false;
+        out.setSign((in.value < 0));
 
         /* Since we will be dividing the decimal value with base 1 billion
          * we don't want negative results if the decimal is negative.

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/Decimal/CastVarCharDecimal.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/CastVarCharDecimal.java b/exec/java-exec/src/main/codegen/templates/Decimal/CastVarCharDecimal.java
index 9ca0533..ceebc0a 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/CastVarCharDecimal.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/CastVarCharDecimal.java
@@ -186,6 +186,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc {
 
         out.scale = (int) scale.value;
         out.precision = (int) precision.value;
+        boolean sign = false;
 
         // Initialize the output buffer
         for (int i = 0; i < ${type.arraySize}; i++) {
@@ -207,7 +208,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc {
 
         if (next == '-') {
             readIndex++;
-            out.sign = true;
+            sign = true;
         }
 
         if (next == '.') {
@@ -340,6 +341,7 @@ public class Cast${type.from}${type.to} implements DrillSimpleFunc {
             int padding = (int) org.apache.drill.common.util.DecimalUtility.getPowerOfTen((int) (org.apache.drill.common.util.DecimalUtility.MAX_DIGITS - ndigits));
             out.setInteger(decimalBufferIndex, out.getInteger(decimalBufferIndex) * padding);
         }
+        out.setSign(sign);
     }
 }
 </#if> <#-- type.major -->

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/Decimal/DecimalFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/Decimal/DecimalFunctions.java b/exec/java-exec/src/main/codegen/templates/Decimal/DecimalFunctions.java
index a41fb20..3f5b5cd 100644
--- a/exec/java-exec/src/main/codegen/templates/Decimal/DecimalFunctions.java
+++ b/exec/java-exec/src/main/codegen/templates/Decimal/DecimalFunctions.java
@@ -23,9 +23,9 @@ import org.apache.drill.exec.expr.annotations.Workspace;
 <#macro compareBlock holderType left right absCompare output>
 
         outside:{
-            ${output} = org.apache.drill.common.util.DecimalUtility.compareSparseBytes(left.buffer, left.start, left.sign,
+            ${output} = org.apache.drill.common.util.DecimalUtility.compareSparseBytes(left.buffer, left.start, left.getSign(),
                             left.scale, left.precision, right.buffer,
-                            right.start, right.sign, right.precision,
+                            right.start, right.getSign(), right.precision,
                             right.scale, left.WIDTH, left.nDecimalDigits, ${absCompare});
 
     }
@@ -295,9 +295,9 @@ public class ${type.name}Functions {
              * causes the sign of one of the inputs to change and hence it effectively
              * becomes addition
              */
-            if (left.sign != right.sign) {
+            if (left.getSign() != right.getSign()) {
                 <@addBlock holderType=type.name in1="left" in2="right" result="result"/>
-                result.sign = left.sign;
+                result.setSign(left.getSign());
             } else {
                 /* Sign of the inputs are the same, meaning we have to perform subtraction
                  * For subtraction we need left input to be greater than right input
@@ -313,10 +313,10 @@ public class ${type.name}Functions {
                 }
 
                 //Determine the sign of the result
-                if ((left.sign == false && cmp == -1) || (left.sign == true && cmp == 1)) {
-                    result.sign = true;
+                if ((left.getSign() == false && cmp == -1) || (left.getSign() == true && cmp == 1)) {
+                    result.setSign(true);
                 } else {
-                    result.sign = false;
+                    result.setSign(false);
                 }
             }
 
@@ -347,7 +347,7 @@ public class ${type.name}Functions {
             }
 
             /* If sign is different use the subtraction logic */
-            if (left.sign != right.sign) {
+            if (left.getSign() != right.getSign()) {
 
                 /* Subtract logic assumes, left input is greater than right input
                  * swap if necessary
@@ -357,17 +357,17 @@ public class ${type.name}Functions {
 
                 if (cmp == -1) {
                     <@subtractBlock holderType=type.name in1="right" in2="left" result="result"/>
-                    result.sign = right.sign;
+                    result.setSign(right.getSign());
                 } else {
                     <@subtractBlock holderType=type.name in1="left" in2="right" result="result"/>
-                    result.sign = left.sign;
+                    result.setSign(left.getSign());
                 }
 
 
             } else {
                 /* Sign of the two input decimals is the same, use the add logic */
                 <@addBlock holderType=type.name in1="left" in2="right" result="result"/>
-                result.sign = left.sign;
+                result.setSign(left.getSign());
             }
         }
     }
@@ -486,7 +486,7 @@ public class ${type.name}Functions {
               result.setInteger(outputIndex--, 0);
             }
 
-            result.sign = (left.sign == right.sign) ? false : true;
+            result.setSign(left.getSign() != right.getSign());
         }
     }
 
@@ -609,7 +609,7 @@ public class ${type.name}Functions {
 
           boolean zeroValue = true;
 
-          if (in.sign == true) {
+          if (in.getSign() == true) {
             out.value = -1;
           } else {
             for (int i = 0; i < ${type.storage}; i++) {
@@ -637,7 +637,7 @@ public class ${type.name}Functions {
           out.precision = in.precision;
           out.buffer = in.buffer;
           out.start = in.start;
-          out.sign = in.sign;
+          boolean sign = in.getSign();
 
           // Indicates whether we need to add 1 to the integer part, while performing ceil
           int carry = 0;
@@ -645,7 +645,7 @@ public class ${type.name}Functions {
           int scaleStartIndex = ${type.storage} - org.apache.drill.common.util.DecimalUtility.roundUp(in.scale);
           int srcIntIndex = scaleStartIndex - 1;
 
-          if (out.sign == false) {
+          if (sign == false) {
             // For negative values ceil we don't need to increment the integer part
             while (scaleStartIndex < ${type.storage}) {
               if (out.getInteger(scaleStartIndex) != 0) {
@@ -684,6 +684,8 @@ public class ${type.name}Functions {
               }
             }
           }
+          // set the sign
+          out.setSign(sign);
         }
     }
 
@@ -701,7 +703,7 @@ public class ${type.name}Functions {
           out.precision = in.precision;
           out.buffer = in.buffer;
           out.start = in.start;
-          out.sign = in.sign;
+          boolean sign = in.getSign();
 
           // Indicates whether we need to decrement 1 from the integer part, while performing floor, done for -ve values
           int carry = 0;
@@ -709,7 +711,7 @@ public class ${type.name}Functions {
           int scaleStartIndex = ${type.storage} - org.apache.drill.common.util.DecimalUtility.roundUp(in.scale);
           int srcIntIndex = scaleStartIndex - 1;
 
-          if (out.sign == true) {
+          if (sign == true) {
             // For negative values ceil we don't need to increment the integer part
             while (scaleStartIndex < ${type.storage}) {
               if (out.getInteger(scaleStartIndex) != 0) {
@@ -730,7 +732,6 @@ public class ${type.name}Functions {
           while (destIndex >= 0) {
             out.setInteger(destIndex--, 0);
           }
-
           // Add the carry
           if (carry != 0) {
             destIndex = ${type.storage} - 1;
@@ -748,6 +749,8 @@ public class ${type.name}Functions {
               }
             }
           }
+          // set the sign
+          out.setSign(sign);
         }
     }
 
@@ -765,7 +768,7 @@ public class ${type.name}Functions {
           out.precision = in.precision;
           out.buffer = in.buffer;
           out.start = in.start;
-          out.sign = in.sign;
+          boolean sign = in.getSign();
 
           // Integer part's src index
           int srcIntIndex = ${type.storage} - org.apache.drill.common.util.DecimalUtility.roundUp(in.scale) - 1;
@@ -780,6 +783,8 @@ public class ${type.name}Functions {
           while (destIndex >= 0) {
             out.setInteger(destIndex--, 0);
           }
+            // set the sign
+            out.setSign(sign);
         }
     }
 
@@ -798,7 +803,7 @@ public class ${type.name}Functions {
           result.precision = left.precision;
           result.buffer = left.buffer;
           result.start = left.start;
-          result.sign = left.sign;
+          boolean sign = left.getSign();
 
           int newScaleRoundedUp  = org.apache.drill.common.util.DecimalUtility.roundUp(right.value);
           int origScaleRoundedUp = org.apache.drill.common.util.DecimalUtility.roundUp(left.scale);
@@ -855,6 +860,8 @@ public class ${type.name}Functions {
               }
             }
           }
+            // set the sign
+            result.setSign(sign);
         }
     }
 
@@ -872,7 +879,7 @@ public class ${type.name}Functions {
           out.precision = in.precision;
           out.buffer = in.buffer;
           out.start = in.start;
-          out.sign = in.sign;
+          boolean sign = in.getSign();
 
           boolean roundUp = false;
 
@@ -915,6 +922,8 @@ public class ${type.name}Functions {
               }
             }
           }
+            // set the sign
+            out.setSign(sign);
         }
     }
 
@@ -933,9 +942,11 @@ public class ${type.name}Functions {
           result.precision = left.precision;
           result.buffer = left.buffer;
           result.start = left.start;
-          result.sign = left.sign;
+          boolean sign = left.getSign();
 
           org.apache.drill.common.util.DecimalUtility.roundDecimal(result.buffer, result.start, result.nDecimalDigits, result.scale, left.scale);
+          // set the sign
+          result.setSign(sign);
         }
     }
 
@@ -1074,7 +1085,7 @@ public class ${type.name}Functions {
         public void setup(RecordBatch incoming) {}
 
         public void eval() {
-            out.value = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.sign, right.buffer, right.start, right.sign, left.WIDTH);
+            out.value = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.getSign(), right.buffer, right.start, right.getSign(), left.WIDTH);
         }
     }
 
@@ -1087,7 +1098,7 @@ public class ${type.name}Functions {
         public void setup(RecordBatch incoming) {}
 
         public void eval() {
-            int cmp  = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.sign, right.buffer, right.start, right.sign, left.WIDTH);
+            int cmp  = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.getSign(), right.buffer, right.start, right.getSign(), left.WIDTH);
             out.value = cmp == -1 ? 1 : 0;
         }
     }
@@ -1101,7 +1112,7 @@ public class ${type.name}Functions {
         public void setup(RecordBatch incoming) {}
 
         public void eval() {
-            int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.sign, right.buffer, right.start, right.sign, left.WIDTH);
+            int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.getSign(), right.buffer, right.start, right.getSign(), left.WIDTH);
             out.value = cmp < 1 ? 1 : 0;
         }
     }
@@ -1115,7 +1126,7 @@ public class ${type.name}Functions {
         public void setup(RecordBatch incoming) {}
 
         public void eval() {
-            int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.sign, right.buffer, right.start, right.sign, left.WIDTH);
+            int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.getSign(), right.buffer, right.start, right.getSign(), left.WIDTH);
             out.value = cmp == 1 ? 1 : 0;
         }
     }
@@ -1129,7 +1140,7 @@ public class ${type.name}Functions {
         public void setup(RecordBatch incoming) {}
 
         public void eval() {
-            int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.sign, right.buffer, right.start, right.sign, left.WIDTH);
+            int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.getSign(), right.buffer, right.start, right.getSign(), left.WIDTH);
             out.value = cmp > -1 ? 1 : 0;
         }
     }
@@ -1143,7 +1154,7 @@ public class ${type.name}Functions {
         public void setup(RecordBatch incoming) {}
 
         public void eval() {
-            int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.sign, right.buffer, right.start, right.sign, left.WIDTH);
+            int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.getSign(), right.buffer, right.start, right.getSign(), left.WIDTH);
             out.value = cmp == 0 ? 1 : 0;
         }
     }
@@ -1159,7 +1170,7 @@ public class ${type.name}Functions {
 
         public void eval() {
 
-            int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.sign, right.buffer, right.start, right.sign, left.WIDTH);
+            int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(left.buffer, left.start, left.getSign(), right.buffer, right.start, right.getSign(), left.WIDTH);
             out.value = cmp != 0 ? 1 : 0;
         }
     }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/DecimalAggrTypeFunctions1.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/DecimalAggrTypeFunctions1.java b/exec/java-exec/src/main/codegen/templates/DecimalAggrTypeFunctions1.java
index 5e02ff0..c5a927c 100644
--- a/exec/java-exec/src/main/codegen/templates/DecimalAggrTypeFunctions1.java
+++ b/exec/java-exec/src/main/codegen/templates/DecimalAggrTypeFunctions1.java
@@ -72,13 +72,13 @@ public static class ${type.inputType}${aggrtype.className} implements DrillAggFu
     for (int i = 0; i < value.nDecimalDigits; i++) {
       value.setInteger(i, 0xFFFFFFFF);
     }
-    value.sign = true;
+    value.setSign(true);
     <#elseif aggrtype.funcName == "min">
     for (int i = 0; i < value.nDecimalDigits; i++) {
       value.setInteger(i, 0x7FFFFFFF);
     }
     // Set sign to be positive so initial value is maximum
-    value.sign = false;
+    value.setSign(false);
     value.precision = ${type.runningType}Holder.maxPrecision;
     </#if>
     <#elseif type.outputType == "Decimal9" || type.outputType == "Decimal18">
@@ -101,21 +101,21 @@ public static class ${type.inputType}${aggrtype.className} implements DrillAggFu
     value.value++;
     <#elseif aggrtype.funcName == "max">
     <#if type.outputType.endsWith("Dense")>
-    int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(in.buffer, in.start, in.sign, value.buffer, value.start, value.sign, in.WIDTH);
+    int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(in.buffer, in.start, in.getSign(), value.buffer, value.start, value.getSign(), in.WIDTH);
     if (cmp == 1) {
       in.buffer.getBytes(in.start, value.buffer, 0, value.WIDTH);
-      value.sign = in.sign;
+      value.setSign(in.getSign());
       value.scale = in.scale;
       value.precision = in.precision;
     }
     <#elseif type.outputType.endsWith("Sparse")>
-    int cmp = org.apache.drill.common.util.DecimalUtility.compareSparseBytes(in.buffer, in.start, in.sign,
+    int cmp = org.apache.drill.common.util.DecimalUtility.compareSparseBytes(in.buffer, in.start, in.getSign(),
       in.scale, in.precision, value.buffer,
-      value.start, value.sign, value.precision,
+      value.start, value.getSign(), value.precision,
       value.scale, in.WIDTH, in.nDecimalDigits, false);
     if (cmp == 1) {
       in.buffer.getBytes(in.start, value.buffer, 0, value.WIDTH);
-      value.sign = in.sign;
+      value.setSign(in.getSign());
       value.scale = in.scale;
       value.precision = in.precision;
     }
@@ -124,21 +124,21 @@ public static class ${type.inputType}${aggrtype.className} implements DrillAggFu
     </#if>
     <#elseif aggrtype.funcName == "min">
     <#if type.outputType.endsWith("Dense")>
-    int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(in.buffer, in.start, in.sign, value.buffer, value.start, value.sign, in.WIDTH);
+    int cmp = org.apache.drill.common.util.DecimalUtility.compareDenseBytes(in.buffer, in.start, in.getSign(), value.buffer, value.start, value.getSign(), in.WIDTH);
     if (cmp == -1) {
       in.buffer.getBytes(in.start, value.buffer, 0, value.WIDTH);
-      value.sign = in.sign;
+      value.setSign(in.getSign());
       value.scale = in.scale;
       value.precision = in.precision;
     }
     <#elseif type.outputType.endsWith("Sparse")>
-    int cmp = org.apache.drill.common.util.DecimalUtility.compareSparseBytes(in.buffer, in.start, in.sign,
+    int cmp = org.apache.drill.common.util.DecimalUtility.compareSparseBytes(in.buffer, in.start, in.getSign(),
       in.scale, in.precision, value.buffer,
-      value.start, value.sign, value.precision,
+      value.start, value.getSign(), value.precision,
       value.scale, in.WIDTH, in.nDecimalDigits, false);
     if (cmp == -1) {
       in.buffer.getBytes(in.start, value.buffer, 0, value.WIDTH);
-      value.sign = in.sign;
+      value.setSign(in.getSign());
       value.scale = in.scale;
       value.precision = in.precision;
     }
@@ -159,7 +159,7 @@ public static class ${type.inputType}${aggrtype.className} implements DrillAggFu
     <#if type.outputType.endsWith("Dense") || type.outputType.endsWith("Sparse")>
     out.buffer = value.buffer;
     out.start = value.start;
-    out.sign = value.sign;
+    out.setSign(value.getSign());
     <#elseif type.outputType == "Decimal9" || type.outputType == "Decimal18">
     out.value = value.value;
     </#if>
@@ -184,9 +184,9 @@ public static class ${type.inputType}${aggrtype.className} implements DrillAggFu
     }
     <#if aggrtype.funcName == "min">
     // Set sign to be positive so initial value is maximum
-    value.sign = false;
+    value.setSign(false);
     <#elseif aggrtype.funcName == "max">
-    value.sign = true;
+    value.setSign(true);
     </#if>
     <#elseif type.outputType == "Decimal9" || type.outputType == "Decimal18">
     value.value = ${type.initValue};

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/FixedValueVectors.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/FixedValueVectors.java b/exec/java-exec/src/main/codegen/templates/FixedValueVectors.java
index 7ff7327..d9715a7 100644
--- a/exec/java-exec/src/main/codegen/templates/FixedValueVectors.java
+++ b/exec/java-exec/src/main/codegen/templates/FixedValueVectors.java
@@ -402,20 +402,8 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
          * big endian ordering makes sense for this purpose.  So we have to deal with
          * the sign bit for the two representation in a slightly different fashion
          */
-
-        // Get the sign of the decimal
-          <#if minor.class.endsWith("Sparse")>
-          if ((holder.buffer.getInt(holder.start) & 0x80000000) != 0) {
-          <#elseif minor.class.endsWith("Dense")>
-          if ((holder.buffer.getInt(holder.start) & 0x00000080) != 0) {
-          </#if>
-            holder.sign = true;
-        }
-
         holder.scale = getField().getScale();
         holder.precision = getField().getPrecision();
-
-
     }
 
     public void get(int index, Nullable${minor.class}Holder holder) {
@@ -423,15 +411,6 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
         holder.start = index * ${type.width};
 
         holder.buffer = data;
-
-          // Get the sign the of the decimal
-          <#if minor.class.endsWith("Sparse")>
-          if ((holder.buffer.getInt(holder.start) & 0x80000000) != 0) {
-          <#elseif minor.class.endsWith("Dense")>
-          if ((holder.buffer.getInt(holder.start) & 0x00000080) != 0) {
-          </#if>
-            holder.sign = true;
-        }
     }
 
       @Override
@@ -690,31 +669,10 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public void set(int index, ${minor.class}Holder holder){
       data.setBytes(index * ${type.width}, holder.buffer, holder.start, ${type.width});
-
-      // Set the sign of the decimal
-      if (holder.sign == true) {
-          int value = data.getInt(index * ${type.width});
-          <#if minor.class.endsWith("Sparse")>
-          data.setInt(index * ${type.width}, (value | 0x80000000));
-          <#elseif minor.class.endsWith("Dense")>
-          data.setInt(index * ${type.width}, (value | 0x00000080));
-          </#if>
-
-      }
    }
 
    void set(int index, Nullable${minor.class}Holder holder){
        data.setBytes(index * ${type.width}, holder.buffer, holder.start, ${type.width});
-
-      // Set the sign of the decimal
-      if (holder.sign == true) {
-          int value = data.getInt(index * ${type.width});
-          <#if minor.class.endsWith("Sparse")>
-          data.setInt(index * ${type.width}, (value | 0x80000000));
-          <#elseif minor.class.endsWith("Dense")>
-          data.setInt(index * ${type.width}, (value | 0x00000080));
-          </#if>
-      }
    }
 
    public boolean setSafe(int index,  Nullable${minor.class}Holder holder){

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/ParquetOutputRecordWriter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/ParquetOutputRecordWriter.java b/exec/java-exec/src/main/codegen/templates/ParquetOutputRecordWriter.java
index 07bd449..92267e7 100644
--- a/exec/java-exec/src/main/codegen/templates/ParquetOutputRecordWriter.java
+++ b/exec/java-exec/src/main/codegen/templates/ParquetOutputRecordWriter.java
@@ -163,7 +163,7 @@ public abstract class ParquetOutputRecordWriter implements RecordWriter {
       byte[] bytes = DecimalUtility.getBigDecimalFromSparse(
               valueHolder.buffer, valueHolder.start, ${minor.class}Holder.nDecimalDigits, valueHolder.scale).unscaledValue().toByteArray();
       byte[] output = new byte[ParquetTypeHelper.getLengthForMinorType(MinorType.${minor.class?upper_case})];
-      if (valueHolder.sign) {
+      if (valueHolder.getSign()) {
         Arrays.fill(output, 0, output.length - bytes.length, (byte)0xFF);
       } else {
         Arrays.fill(output, 0, output.length - bytes.length, (byte)0x0);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/codegen/templates/ValueHolders.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/ValueHolders.java b/exec/java-exec/src/main/codegen/templates/ValueHolders.java
index cdc4d74..46cf9e0 100644
--- a/exec/java-exec/src/main/codegen/templates/ValueHolders.java
+++ b/exec/java-exec/src/main/codegen/templates/ValueHolders.java
@@ -72,7 +72,6 @@ public final class ${className} implements ValueHolder{
     public int precision;
     public static final int maxPrecision = ${minor.maxPrecisionDigits};
     <#if minor.class.startsWith("Decimal28") || minor.class.startsWith("Decimal38")>
-    public boolean sign;
     public int start;
     public ByteBuf buffer;
     public static final int nDecimalDigits = ${minor.nDecimalDigits};
@@ -96,6 +95,18 @@ public final class ${className} implements ValueHolder{
         buffer.setInt(start + (index * 4), value);
     }
 
+    public void setSign(boolean sign) {
+      // Set MSB to 1 if sign is negative
+      if (sign == true) {
+        int value = getInteger(0);
+        setInteger(0, (value | 0x80000000));
+      }
+    }
+
+    public boolean getSign() {
+      return ((buffer.getInt(start) & 0x80000000) != 0);
+    }
+
     <#else>
     public ${minor.javaType!type.javaType} value;
     </#if>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/main/java/org/apache/drill/exec/vector/ValueHolderHelper.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/ValueHolderHelper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/ValueHolderHelper.java
index fb9dfd0..6968c21 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/ValueHolderHelper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/ValueHolderHelper.java
@@ -85,7 +85,7 @@ public class ValueHolderHelper {
 
     dch.scale = bigDecimal.scale();
     dch.precision = bigDecimal.precision();
-    dch.sign = (bigDecimal.signum() == -1);
+    dch.setSign(bigDecimal.signum() == -1);
     dch.start = 0;
 
     dch.buffer = Unpooled.wrappedBuffer(new byte[5 * DecimalUtility.integerSize]);
@@ -104,7 +104,7 @@ public class ValueHolderHelper {
 
       dch.scale = bigDecimal.scale();
       dch.precision = bigDecimal.precision();
-      dch.sign = (bigDecimal.signum() == -1);
+      dch.setSign(bigDecimal.signum() == -1);
       dch.start = 0;
 
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dec032f/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestDecimal.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestDecimal.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestDecimal.java
index f485378..093366f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestDecimal.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestDecimal.java
@@ -278,95 +278,4 @@ public class TestDecimal extends PopUnitTestBase{
             }
         }
     }
-
-    @Test
-    public void testDenseSparseConversion() throws Exception {
-
-        /* Function checks the following workflow
-         * VarChar -> Sparse -> Dense -> Sort(Dense) -> Sparse -> VarChar
-         */
-        try (RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
-             Drillbit bit = new Drillbit(CONFIG, serviceSet);
-             DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
-
-            // run query.
-            bit.run();
-            client.connect();
-            List<QueryResultBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL,
-                    Files.toString(FileUtils.getResourceAsFile("/decimal/test_decimal_dense_sparse.json"), Charsets.UTF_8)
-                            .replace("#{TEST_FILE}", "/input_complex_decimal.json")
-            );
-
-            RecordBatchLoader batchLoader = new RecordBatchLoader(bit.getContext().getAllocator());
-
-            QueryResultBatch batch = results.get(0);
-            assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));
-
-            String sortOutput[] = {"-100000000001.000000000000", "-100000000001.000000000000", "-0.120000000000", "0.100000000001",  "11.123456789012", "100000000001.123456789001", "123456789123456789.000000000000"};
-
-            Iterator<VectorWrapper<?>> itr = batchLoader.iterator();
-
-            // Check the output of sort
-            VectorWrapper<?> v = itr.next();
-            ValueVector.Accessor accessor = v.getValueVector().getAccessor();
-
-            for (int i = 0; i < accessor.getValueCount(); i++) {
-                assertEquals(accessor.getObject(i).toString(), sortOutput[i]);
-            }
-            assertEquals(7, accessor.getValueCount());
-
-            batchLoader.clear();
-            for (QueryResultBatch result : results) {
-              result.release();
-            }
-        }
-    }
-
-    @Test
-    public void testDenseSparseConversion1() throws Exception {
-
-        /* Function checks the following cast sequence.
-         * VarChar          -> Decimal28Sparse
-         * Decimal28Sparse  -> Decimal28Dense
-         * Decimal28Dense   -> Decimal38Dense
-         *
-         * Goal is to test the similar casting functionality 28Dense -> 38Dense
-         *
-         */
-        try (RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
-             Drillbit bit = new Drillbit(CONFIG, serviceSet);
-             DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
-
-            // run query.
-            bit.run();
-            client.connect();
-            List<QueryResultBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL,
-                    Files.toString(FileUtils.getResourceAsFile("/decimal/test_decimal_sparse_dense_dense.json"), Charsets.UTF_8)
-                            .replace("#{TEST_FILE}", "/input_simple_decimal.json")
-            );
-
-            RecordBatchLoader batchLoader = new RecordBatchLoader(bit.getContext().getAllocator());
-
-            QueryResultBatch batch = results.get(0);
-            assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));
-
-            String output[] = {"99.0000", "11.1234", "0.1000", "-0.1200", "-123.1234", "-1.0001"};
-
-            Iterator<VectorWrapper<?>> itr = batchLoader.iterator();
-
-            // Check the output of sort
-            VectorWrapper<?> v = itr.next();
-            ValueVector.Accessor accessor = v.getValueVector().getAccessor();
-
-            for (int i = 0; i < accessor.getValueCount(); i++) {
-                assertEquals(accessor.getObject(i).toString(), output[i]);
-            }
-            assertEquals(6, accessor.getValueCount());
-
-            batchLoader.clear();
-            for (QueryResultBatch result : results) {
-              result.release();
-            }
-        }
-    }
 }


[04/32] git commit: DRILL-1020: Fix bug in dynamic allocation

Posted by ja...@apache.org.
DRILL-1020: Fix bug in dynamic allocation


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/c7712f80
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/c7712f80
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/c7712f80

Branch: refs/heads/master
Commit: c7712f8053a7bcf5028028db68a4a5580c442a00
Parents: c373a27
Author: Steven Phillips <sp...@maprtech.com>
Authored: Tue Jun 17 03:14:04 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Wed Jun 18 20:28:25 2014 -0700

----------------------------------------------------------------------
 .../codegen/templates/FixedValueVectors.java    | 45 ++++++++-----
 .../codegen/templates/NullableValueVectors.java |  4 +-
 .../templates/VariableLengthVectors.java        | 38 +++++++----
 .../org/apache/drill/exec/vector/BitVector.java | 45 +++++++++----
 .../exec/vector/TestAdaptiveAllocation.java     | 67 ++++++++++++++++----
 5 files changed, 144 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/c7712f80/exec/java-exec/src/main/codegen/templates/FixedValueVectors.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/FixedValueVectors.java b/exec/java-exec/src/main/codegen/templates/FixedValueVectors.java
index a83ec97..7ff7327 100644
--- a/exec/java-exec/src/main/codegen/templates/FixedValueVectors.java
+++ b/exec/java-exec/src/main/codegen/templates/FixedValueVectors.java
@@ -213,13 +213,24 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
   
   public boolean copyFromSafe(int fromIndex, int thisIndex, ${minor.class}Vector from){
     if(thisIndex >= getValueCapacity()) {
-      allocationMonitor--;
+      decrementAllocationMonitor();
       return false;
     }
     copyFrom(fromIndex, thisIndex, from);
     return true;
   }
 
+  private void decrementAllocationMonitor() {
+    if (allocationMonitor > 0) {
+      allocationMonitor = 0;
+    }
+    --allocationMonitor;
+  }
+
+  private void incrementAllocationMonitor() {
+    ++allocationMonitor;
+  }
+
   public final class Accessor extends BaseValueVector.BaseAccessor{
 
     final FieldReader reader = new ${minor.class}ReaderImpl(${minor.class}Vector.this);
@@ -576,7 +587,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index, <#if (type.width > 4)>${minor.javaType!type.javaType}<#else>int</#if> value) {
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      data.setBytes(index * ${type.width}, value, 0, ${type.width});
@@ -597,7 +608,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index, ${minor.class}Holder holder){
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      set(index, holder);
@@ -606,7 +617,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index, Nullable${minor.class}Holder holder){
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      set(index, holder);
@@ -629,7 +640,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index, ${minor.class}Holder holder){
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      set(index, holder);
@@ -638,7 +649,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index, Nullable${minor.class}Holder holder){
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      set(index, holder);
@@ -659,7 +670,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index, ${minor.class}Holder holder){
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      set(index, holder);
@@ -668,7 +679,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index, Nullable${minor.class}Holder holder){
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      set(index, holder);
@@ -708,7 +719,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index,  Nullable${minor.class}Holder holder){
        if(index >= getValueCapacity()) {
-         allocationMonitor--;
+         decrementAllocationMonitor();
          return false;
        }
        set(index, holder);
@@ -717,7 +728,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index,  ${minor.class}Holder holder){
        if(index >= getValueCapacity()) {
-         allocationMonitor--;
+         decrementAllocationMonitor();
          return false;
        }
        set(index, holder);
@@ -731,7 +742,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
    
    public boolean setSafe(int index, ${minor.class}Holder holder){
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      set(index, holder);
@@ -762,7 +773,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index, <#if (type.width >= 4)>${minor.javaType!type.javaType}<#else>int</#if> value) {
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      set(index, value);
@@ -775,7 +786,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index, ${minor.class}Holder holder){
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      set(index, holder);
@@ -788,7 +799,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
 
    public boolean setSafe(int index, Nullable${minor.class}Holder holder){
      if(index >= getValueCapacity()) {
-       allocationMonitor--;
+       decrementAllocationMonitor();
        return false;
      }
      set(index, holder);
@@ -816,10 +827,10 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements F
      int currentValueCapacity = getValueCapacity();
      ${minor.class}Vector.this.valueCount = valueCount;
      int idx = (${type.width} * valueCount);
-     if (valueCount > 0 && currentValueCapacity > idx * 2) {
-       allocationMonitor++;
+     if (valueCount > 0 && currentValueCapacity > valueCount * 2) {
+       incrementAllocationMonitor();
      } else if (allocationMonitor > 0) {
-       allocationMonitor--;
+       allocationMonitor = 0;
      }
      data.writerIndex(idx);
      if (data instanceof AccountingByteBuf) {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/c7712f80/exec/java-exec/src/main/codegen/templates/NullableValueVectors.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/NullableValueVectors.java b/exec/java-exec/src/main/codegen/templates/NullableValueVectors.java
index ce17418..f50aae8 100644
--- a/exec/java-exec/src/main/codegen/templates/NullableValueVectors.java
+++ b/exec/java-exec/src/main/codegen/templates/NullableValueVectors.java
@@ -303,7 +303,9 @@ public final class ${className} extends BaseValueVector implements <#if type.maj
     <#if type.major == "VarLen">
     if(!mutator.fillEmpties(thisIndex)) return false;
     </#if>
-    return bits.copyFromSafe(fromIndex, thisIndex, from.bits) && values.copyFromSafe(fromIndex, thisIndex, from.values);
+    boolean b1 = bits.copyFromSafe(fromIndex, thisIndex, from.bits);
+    boolean b2 = values.copyFromSafe(fromIndex, thisIndex, from.values);
+    return b1 && b2;
   }
 
   

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/c7712f80/exec/java-exec/src/main/codegen/templates/VariableLengthVectors.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/VariableLengthVectors.java b/exec/java-exec/src/main/codegen/templates/VariableLengthVectors.java
index 22a668d..8535f99 100644
--- a/exec/java-exec/src/main/codegen/templates/VariableLengthVectors.java
+++ b/exec/java-exec/src/main/codegen/templates/VariableLengthVectors.java
@@ -168,11 +168,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements V
   }
   
   public boolean copyFromSafe(int fromIndex, int thisIndex, ${minor.class}Vector from){
-    if(thisIndex >= getValueCapacity()) {
-        allocationMonitor--;
-        return false;
-    }
-    
+
     int start = from.offsetVector.getAccessor().get(fromIndex);
     int end =   from.offsetVector.getAccessor().get(fromIndex+1);
     int len = end - start;
@@ -180,10 +176,15 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements V
     int outputStart = offsetVector.data.get${(minor.javaType!type.javaType)?cap_first}(thisIndex * ${type.width});
     
     if(data.capacity() < outputStart + len) {
-        allocationMonitor--;
+        decrementAllocationMonitor();
         return false;
     }
-    
+
+    if (!offsetVector.getMutator().setSafe(thisIndex + 1, outputStart + len)) {
+       decrementAllocationMonitor();
+       return false;
+    }
+
     from.data.getBytes(start, data, outputStart, len);
     offsetVector.data.set${(minor.javaType!type.javaType)?cap_first}( (thisIndex+1) * ${type.width}, outputStart + len);
 
@@ -259,6 +260,17 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements V
     offsetVector.zeroVector();
   }
 
+    private void decrementAllocationMonitor() {
+      if (allocationMonitor > 0) {
+        allocationMonitor = 0;
+      }
+      --allocationMonitor;
+    }
+
+    private void incrementAllocationMonitor() {
+      ++allocationMonitor;
+    }
+
   public Accessor getAccessor(){
     return accessor;
   }
@@ -363,7 +375,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements V
 
       int currentOffset = offsetVector.getAccessor().get(index);
       if (data.capacity() < currentOffset + bytes.length) {
-        allocationMonitor--;
+        decrementAllocationMonitor();
         return false;
       }
       if (!offsetVector.getMutator().setSafe(index + 1, currentOffset + bytes.length)) {
@@ -395,7 +407,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements V
       int currentOffset = offsetVector.getAccessor().get(index);
 
       if (data.capacity() < currentOffset + length) {
-        allocationMonitor--;
+        decrementAllocationMonitor();
         return false;
       }
       if (!offsetVector.getMutator().setSafe(index + 1, currentOffset + length)) {
@@ -416,7 +428,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements V
       int outputStart = offsetVector.data.get${(minor.javaType!type.javaType)?cap_first}(index * ${type.width});
       
       if(data.capacity() < outputStart + len) {
-        allocationMonitor--;
+        decrementAllocationMonitor();
         return false;
       }
       
@@ -439,7 +451,7 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements V
       int outputStart = offsetVector.data.get${(minor.javaType!type.javaType)?cap_first}(index * ${type.width});
       
       if(data.capacity() < outputStart + len) {
-        allocationMonitor--;
+        decrementAllocationMonitor();
         return false;
       }
       
@@ -481,9 +493,9 @@ public final class ${minor.class}Vector extends BaseDataValueVector implements V
       int idx = offsetVector.getAccessor().get(valueCount);
       data.writerIndex(idx);
       if (valueCount > 0 && currentByteCapacity > idx * 2) {
-        allocationMonitor++;
+        incrementAllocationMonitor();
       } else if (allocationMonitor > 0) {
-        allocationMonitor--;
+        allocationMonitor = 0;
       }
       if (data instanceof AccountingByteBuf) {
         data.capacity(idx);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/c7712f80/exec/java-exec/src/main/java/org/apache/drill/exec/vector/BitVector.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/BitVector.java b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/BitVector.java
index e217ddb..73f97fe 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/BitVector.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/BitVector.java
@@ -42,7 +42,7 @@ public final class BitVector extends BaseDataValueVector implements FixedWidthVe
   private final Accessor accessor = new Accessor();
   private final Mutator mutator = new Mutator();
 
-  private int allocationValueCount = 4000;
+  private int allocationValueCount = 4096;
   private int allocationMonitor = 0;
 
   private int valueCapacity;
@@ -73,11 +73,11 @@ public final class BitVector extends BaseDataValueVector implements FixedWidthVe
 
   public boolean allocateNewSafe() {
     clear();
-    if (allocationMonitor > 5) {
-      allocationValueCount = Math.max(1, (int)(allocationValueCount * 0.9));
+    if (allocationMonitor > 10) {
+      allocationValueCount = Math.max(8, (int) (allocationValueCount / 2));
       allocationMonitor = 0;
-    } else if (allocationMonitor < -5) {
-      allocationValueCount = (int) (allocationValueCount * 1.1);
+    } else if (allocationMonitor < -2) {
+      allocationValueCount = (int) (allocationValueCount * 2);
       allocationMonitor = 0;
     }
 
@@ -127,7 +127,10 @@ public final class BitVector extends BaseDataValueVector implements FixedWidthVe
   }
 
   public boolean copyFromSafe(int inIndex, int outIndex, BitVector from){
-    if(outIndex >= this.getValueCapacity()) return false;
+    if(outIndex >= this.getValueCapacity()) {
+      decrementAllocationMonitor();
+      return false;
+    }
     copyFrom(inIndex, outIndex, from);
     return true;
   }
@@ -231,6 +234,17 @@ public final class BitVector extends BaseDataValueVector implements FixedWidthVe
     }
   }
 
+  private void decrementAllocationMonitor() {
+    if (allocationMonitor > 0) {
+      allocationMonitor = 0;
+    }
+    --allocationMonitor;
+  }
+
+  private void incrementAllocationMonitor() {
+    ++allocationMonitor;
+  }
+
   public class Accessor extends BaseAccessor {
 
     /**
@@ -318,7 +332,7 @@ public final class BitVector extends BaseDataValueVector implements FixedWidthVe
 
     public boolean setSafe(int index, int value) {
       if(index >= getValueCapacity()) {
-        allocationMonitor--;
+        decrementAllocationMonitor();
         return false;
       }
       set(index, value);
@@ -326,22 +340,31 @@ public final class BitVector extends BaseDataValueVector implements FixedWidthVe
     }
 
     public boolean setSafe(int index, BitHolder holder) {
-      if(index >= getValueCapacity()) return false;
+      if(index >= getValueCapacity()) {
+        decrementAllocationMonitor();
+        return false;
+      }
       set(index, holder.value);
       return true;
     }
 
     public boolean setSafe(int index, NullableBitHolder holder) {
-      if(index >= getValueCapacity()) return false;
+      if(index >= getValueCapacity()) {
+        decrementAllocationMonitor();
+        return false;
+      }
       set(index, holder.value);
       return true;
     }
 
     public final void setValueCount(int valueCount) {
+      int currentValueCapacity = getValueCapacity();
       BitVector.this.valueCount = valueCount;
       int idx = getSizeFromCount(valueCount);
-      if (((float) data.capacity()) / idx > 1.1) {
-        allocationMonitor++;
+      if (valueCount > 0 && currentValueCapacity > valueCount * 2) {
+        incrementAllocationMonitor();
+      } else if (allocationMonitor > 0) {
+        allocationMonitor = 0;
       }
       data.writerIndex(idx);
       if (data instanceof AccountingByteBuf) {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/c7712f80/exec/java-exec/src/test/java/org/apache/drill/exec/vector/TestAdaptiveAllocation.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/vector/TestAdaptiveAllocation.java b/exec/java-exec/src/test/java/org/apache/drill/exec/vector/TestAdaptiveAllocation.java
index d86b5db..f554e3f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/vector/TestAdaptiveAllocation.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/vector/TestAdaptiveAllocation.java
@@ -29,43 +29,84 @@ import org.junit.Test;
 
 import java.util.Random;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 public class TestAdaptiveAllocation {
 
   @Test
   public void test() throws Exception {
     BufferAllocator allocator = new TopLevelAllocator();
     MaterializedField field = MaterializedField.create("field", Types.required(MinorType.VARCHAR));
-    VarBinaryVector varBinaryVector = new VarBinaryVector(field, allocator);
+    NullableVarBinaryVector vector1 = new NullableVarBinaryVector(field, allocator);
+    NullableVarCharVector vector2 = new NullableVarCharVector(field, allocator);
+    NullableBigIntVector vector3 = new NullableBigIntVector(field, allocator);
 
     Random rand = new Random();
 //    int valuesToWrite = rand.nextInt(4000) + 1000;
 //    int bytesToWrite = rand.nextInt(100);
-    int valuesToWrite = 100;
-    int bytesToWrite = 1;
+    int valuesToWrite = 8000;
+    int bytesToWrite1 = 2;
+    int bytesToWrite2 = 200;
 //    System.out.println("value: " + valuesToWrite);
 //    System.out.println("bytes: " + bytesToWrite);
 
-    byte[] value = new byte[bytesToWrite];
+    byte[] value1 = new byte[bytesToWrite1];
+    byte[] value2 = new byte[bytesToWrite2];
+
+    NullableVarBinaryVector copyVector1 = new NullableVarBinaryVector(field, allocator);
+    NullableVarCharVector copyVector2 = new NullableVarCharVector(field, allocator);
+    NullableBigIntVector copyVector3 = new NullableBigIntVector(field, allocator);
+
+    copyVector1.allocateNew();
+    copyVector2.allocateNew();
+    copyVector3.allocateNew();
+
+    copyVector1.getMutator().set(0, value1);
+    copyVector2.getMutator().set(0, value2);
+    copyVector3.getMutator().set(0, 100);
 
     for (int i = 0; i < 10000; i++) {
-      varBinaryVector.allocateNew();
+      vector1.allocateNew();
+      vector2.allocateNew();
+      vector3.allocateNew();
 //      System.out.println("Value Capacity: " + varBinaryVector.getValueCapacity());
 //      System.out.println("Byte Capacity: " + varBinaryVector.getByteCapacity());
       int offset = 0;
       int j = 0;
-      for (j = 0; j < valuesToWrite; j++) {
-        if (!varBinaryVector.getMutator().setSafe(j - offset, value)) {
-          varBinaryVector.getMutator().setValueCount(j - offset);
+      int toWrite = (int) valuesToWrite * (int) (2 + rand.nextGaussian()) / 2;
+      for (j = 0; j < toWrite; j += 1) {
+//        if (!(vector1.getMutator().setSafe(j - offset, value1, 0, value1.length) &&
+//        vector2.getMutator().setSafe(j - offset, value2, 0 , value2.length) &&
+//        vector3.getMutator().setSafe(j - offset, 100))) {
+        if (!(vector1.copyFromSafe(0, j - offset, copyVector1) &&
+          vector2.copyFromSafe(0, j - offset, copyVector2) &&
+          vector3.copyFromSafe(0, j - offset, copyVector3))) {
+          vector1.getMutator().setValueCount(j - offset);
+          vector2.getMutator().setValueCount(j - offset);
+          vector3.getMutator().setValueCount(j - offset);
           offset = j;
-          varBinaryVector.allocateNew();
+          vector1.clear();
+          vector2.clear();
+          vector3.clear();
+          vector1.allocateNew();
+          vector2.allocateNew();
+          vector3.allocateNew();
 //          System.out.println("Value Capacity: " + varBinaryVector.getValueCapacity());
 //          System.out.println("Byte Capacity: " + varBinaryVector.getByteCapacity());
         }
       }
-      varBinaryVector.getMutator().setValueCount(j - offset);
+      vector1.getMutator().setValueCount(j - offset);
+      vector2.getMutator().setValueCount(j - offset);
+      vector3.getMutator().setValueCount(j - offset);
     }
-    varBinaryVector.allocateNew();
-    System.out.println(varBinaryVector.getValueCapacity());
-    System.out.println(varBinaryVector.getByteCapacity());
+    vector1.allocateNew();
+    vector2.allocateNew();
+    vector3.allocateNew();
+    assertTrue(vector1.getValueCapacity() > 8000);
+    assertTrue(vector2.getValueCapacity() > 8000);
+    assertTrue(vector3.getValueCapacity() > 8000);
+    assertTrue(vector1.getByteCapacity() > 8000 * 2);
+    assertTrue(vector2.getByteCapacity() > 8000 * 200);
   }
 }


[30/32] git commit: DRILL-1024: Move hive storage code out of 'exec/java-exec' into 'contrib/storage-hive' module.

Posted by ja...@apache.org.
DRILL-1024: Move hive storage code out of 'exec/java-exec' into 'contrib/storage-hive' module.

+ Create two modules in contrib/storage-hive
  ++ contrib/storage-hive/hive-exec-shade: creates shaded hive-exec.jar
  ++ contrib/storage-hive/core: contains Hive storage code (schema, record reader and functions)
+ Update TestHiveUDFs.java to use BaseTestQuery instead of SimpleRootExec


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/980dc87b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/980dc87b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/980dc87b

Branch: refs/heads/master
Commit: 980dc87b5c2293e3ed60f588dcef67a6382e3715
Parents: 026d51a
Author: vkorukanti <ve...@gmail.com>
Authored: Wed Jun 18 11:35:23 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Fri Jun 20 10:56:16 2014 -0700

----------------------------------------------------------------------
 contrib/storage-hive/core/pom.xml               | 128 +++++
 .../core/src/main/codegen/config.fmpp           |  22 +
 .../core/src/main/codegen/data/HiveTypes.tdd    | 100 ++++
 .../core/src/main/codegen/includes/license.ftl  |  18 +
 .../templates/ObjectInspectorHelper.java        | 192 +++++++
 .../codegen/templates/ObjectInspectors.java     | 114 ++++
 .../drill/exec/expr/HiveFuncHolderExpr.java     |  76 +++
 .../drill/exec/expr/fn/HiveFuncHolder.java      | 272 ++++++++++
 .../exec/expr/fn/HiveFunctionRegistry.java      | 165 ++++++
 .../hive/AbstractPrimitiveObjectInspector.java  |  70 +++
 .../expr/fn/impl/hive/DrillDeferredObject.java  |  34 ++
 .../drill/exec/planner/sql/HiveUDFOperator.java |  70 +++
 .../exec/store/hive/HiveFieldConverter.java     | 200 +++++++
 .../drill/exec/store/hive/HiveReadEntry.java    |  72 +++
 .../drill/exec/store/hive/HiveRecordReader.java | 542 +++++++++++++++++++
 .../apache/drill/exec/store/hive/HiveScan.java  | 296 ++++++++++
 .../exec/store/hive/HiveScanBatchCreator.java   |  72 +++
 .../exec/store/hive/HiveStoragePlugin.java      |  87 +++
 .../store/hive/HiveStoragePluginConfig.java     |  74 +++
 .../drill/exec/store/hive/HiveSubScan.java      | 145 +++++
 .../apache/drill/exec/store/hive/HiveTable.java | 331 +++++++++++
 .../exec/store/hive/HiveTextRecordReader.java   | 176 ++++++
 .../exec/store/hive/schema/DrillHiveTable.java  | 181 +++++++
 .../store/hive/schema/DrillHiveViewTable.java   |  40 ++
 .../store/hive/schema/HiveDatabaseSchema.java   |  63 +++
 .../store/hive/schema/HiveSchemaFactory.java    | 292 ++++++++++
 .../resources/bootstrap-storage-plugins.json    |   4 +
 .../core/src/main/resources/drill-module.conf   |   5 +
 .../apache/drill/exec/fn/hive/TestHiveUDFs.java | 166 ++++++
 .../exec/store/hive/HiveTestDataGenerator.java  | 261 +++++++++
 .../resources/functions/hive/GenericUDF.json    |  45 ++
 .../src/test/resources/functions/hive/UDF.json  |  39 ++
 .../core/src/test/resources/logback.xml         |  64 +++
 contrib/storage-hive/hive-exec-shade/pom.xml    | 169 ++++++
 .../src/main/resources/drill-module.conf        |   5 +
 contrib/storage-hive/pom.xml                    | 147 +----
 .../src/main/resources/drill-module.conf        |   5 -
 distribution/pom.xml                            |  16 +-
 distribution/src/assemble/bin.xml               |   7 +-
 exec/java-exec/pom.xml                          |  16 -
 exec/java-exec/src/main/codegen/config.fmpp     |   3 +-
 .../src/main/codegen/data/HiveTypes.tdd         | 100 ----
 .../templates/ObjectInspectorHelper.java        | 192 -------
 .../codegen/templates/ObjectInspectors.java     | 114 ----
 .../drill/exec/expr/HiveFuncHolderExpr.java     |  76 ---
 .../drill/exec/expr/fn/HiveFuncHolder.java      | 272 ----------
 .../exec/expr/fn/HiveFunctionRegistry.java      | 166 ------
 .../hive/AbstractPrimitiveObjectInspector.java  |  70 ---
 .../expr/fn/impl/hive/DrillDeferredObject.java  |  34 --
 .../drill/exec/planner/sql/HiveUDFOperator.java |  70 ---
 .../drill/exec/record/RecordBatchLoader.java    |   2 -
 .../apache/drill/exec/record/WritableBatch.java |   2 -
 .../exec/store/hive/HiveFieldConverter.java     | 200 -------
 .../drill/exec/store/hive/HiveReadEntry.java    |  72 ---
 .../drill/exec/store/hive/HiveRecordReader.java | 542 -------------------
 .../apache/drill/exec/store/hive/HiveScan.java  | 296 ----------
 .../exec/store/hive/HiveScanBatchCreator.java   |  72 ---
 .../exec/store/hive/HiveStoragePlugin.java      |  87 ---
 .../store/hive/HiveStoragePluginConfig.java     |  74 ---
 .../drill/exec/store/hive/HiveSubScan.java      | 145 -----
 .../apache/drill/exec/store/hive/HiveTable.java | 331 -----------
 .../exec/store/hive/HiveTextRecordReader.java   | 176 ------
 .../exec/store/hive/schema/DrillHiveTable.java  | 181 -------
 .../store/hive/schema/DrillHiveViewTable.java   |  40 --
 .../store/hive/schema/HiveDatabaseSchema.java   |  63 ---
 .../store/hive/schema/HiveSchemaFactory.java    | 292 ----------
 .../drill/exec/physical/impl/TestHiveUDFs.java  | 210 -------
 .../exec/store/hive/HiveTestDataGenerator.java  | 261 ---------
 .../resources/functions/hive/GenericUDF.json    |  45 --
 .../src/test/resources/functions/hive/UDF.json  |  39 --
 exec/jdbc/pom.xml                               |  11 +
 71 files changed, 4623 insertions(+), 4396 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/pom.xml b/contrib/storage-hive/core/pom.xml
new file mode 100644
index 0000000..38e4a41
--- /dev/null
+++ b/contrib/storage-hive/core/pom.xml
@@ -0,0 +1,128 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<project
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
+  xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.drill.contrib.storage-hive</groupId>
+    <artifactId>drill-contrib-storage-hive-parent</artifactId>
+    <version>1.0.0-m2-incubating-SNAPSHOT</version>
+  </parent>
+
+  <artifactId>drill-storage-hive-core</artifactId>
+  <packaging>jar</packaging>
+  <name>contrib/hive-storage-plugin/core</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.drill</groupId>
+      <artifactId>drill-common</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.drill.exec</groupId>
+      <artifactId>drill-java-exec</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.drill.contrib.storage-hive</groupId>
+      <artifactId>drill-hive-exec-shaded</artifactId>
+      <version>${project.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hive</groupId>
+          <artifactId>hive-exec</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>net.hydromatic</groupId>
+      <artifactId>optiq-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.drill</groupId>
+      <artifactId>drill-common</artifactId>
+      <classifier>tests</classifier>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.drill.exec</groupId>
+      <artifactId>drill-java-exec</artifactId>
+      <version>${project.version}</version>
+      <classifier>tests</classifier>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>com.googlecode.fmpp-maven-plugin</groupId>
+        <artifactId>fmpp-maven-plugin</artifactId>
+        <version>1.0</version>
+        <configuration>
+          <cfgFile>src/main/codegen/config.fmpp</cfgFile>
+          <outputDirectory>target/generated-sources</outputDirectory>
+          <templateDirectory>src/main/codegen/templates</templateDirectory>
+        </configuration>
+        <executions>
+          <execution>
+            <id>generate-fmpp-sources</id>
+            <phase>initialize</phase>
+            <goals>
+              <goal>generate</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>1.5</version>
+        <executions>
+          <execution>
+            <id>add-fmpp-sources</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>${project.build.directory}/generated-sources</source>
+              </sources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>test-jar</id>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/codegen/config.fmpp
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/codegen/config.fmpp b/contrib/storage-hive/core/src/main/codegen/config.fmpp
new file mode 100644
index 0000000..cd36891
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/codegen/config.fmpp
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http:# www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+data: {
+    drillOI:tdd(../data/HiveTypes.tdd)
+}
+freemarkerLinks: {
+    includes: includes/
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/codegen/data/HiveTypes.tdd
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/codegen/data/HiveTypes.tdd b/contrib/storage-hive/core/src/main/codegen/data/HiveTypes.tdd
new file mode 100644
index 0000000..c23f981
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/codegen/data/HiveTypes.tdd
@@ -0,0 +1,100 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http:# www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{
+  map: [
+    {
+      hiveType: "BOOLEAN",
+      hiveOI: "BooleanObjectInspector",
+      serdeConstant: "BOOLEAN_TYPE_NAME",
+      javaType: "boolean",
+      minorType: "BIT",
+      holder: "Bit"
+    },
+    {
+      hiveType: "BYTE",
+      hiveOI: "ByteObjectInspector",
+      serdeConstant: "TINYINT_TYPE_NAME",
+      javaType: "byte",
+      minorType: "TINYINT",
+      holder: "TinyInt"
+    },
+    {
+      hiveType: "SHORT",
+      hiveOI: "ShortObjectInspector",
+      serdeConstant: "SMALLINT_TYPE_NAME",
+      javaType: "short",
+      minorType: "SMALLINT",
+      holder: "SmallInt"
+    },
+    {
+      hiveType: "INT",
+      hiveOI: "IntObjectInspector",
+      serdeConstant: "INT_TYPE_NAME",
+      javaType: "int",
+      minorType: "INT",
+      holder: "Int"
+    },
+    {
+      hiveType: "LONG",
+      hiveOI: "LongObjectInspector",
+      serdeConstant: "BIGINT_TYPE_NAME",
+      javaType: "long",
+      minorType: "BIGINT",
+      holder: "BigInt"
+    },
+    {
+      hiveType: "FLOAT",
+      hiveOI: "FloatObjectInspector",
+      serdeConstant: "FLOAT_TYPE_NAME",
+      javaType: "float",
+      minorType: "FLOAT4",
+      holder: "Float4"
+    },
+    {
+      hiveType: "DOUBLE",
+      hiveOI: "DoubleObjectInspector",
+      serdeConstant: "DOUBLE_TYPE_NAME",
+      javaType: "double",
+      minorType: "FLOAT8",
+      holder: "Float8"
+    },
+    {
+      hiveType: "VARCHAR",
+      hiveOI: "HiveVarcharObjectInspector",
+      serdeConstant: "VARCHAR_TYPE_NAME",
+      javaType: "",
+      minorType: "VARCHAR",
+      holder: "VarChar"
+    },
+    {
+      hiveType: "STRING",
+      hiveOI: "StringObjectInspector",
+      serdeConstant: "STRING_TYPE_NAME",
+      javaType: "",
+      minorType: "VAR16CHAR",
+      holder: "Var16Char"
+    },
+    {
+      hiveType: "BINARY",
+      hiveOI: "BinaryObjectInspector",
+      serdeConstant: "BINARY_TYPE_NAME",
+      javaType: "",
+      minorType: "VARBINARY",
+      holder: "VarBinary"
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/codegen/includes/license.ftl
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/codegen/includes/license.ftl b/contrib/storage-hive/core/src/main/codegen/includes/license.ftl
new file mode 100644
index 0000000..0455fd8
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/codegen/includes/license.ftl
@@ -0,0 +1,18 @@
+/*******************************************************************************
+
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ ******************************************************************************/
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/codegen/templates/ObjectInspectorHelper.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/codegen/templates/ObjectInspectorHelper.java b/contrib/storage-hive/core/src/main/codegen/templates/ObjectInspectorHelper.java
new file mode 100644
index 0000000..22a9eb2
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/codegen/templates/ObjectInspectorHelper.java
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+<@pp.dropOutputFile />
+<@pp.changeOutputFile name="/org/apache/drill/exec/expr/fn/impl/hive/ObjectInspectorHelper.java" />
+
+<#include "/@includes/license.ftl" />
+
+package org.apache.drill.exec.expr.fn.impl.hive;
+
+import com.sun.codemodel.*;
+
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.expr.DirectExpression;
+import org.apache.drill.exec.expr.TypeHelper;
+import org.apache.drill.exec.expr.holders.*;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.*;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class ObjectInspectorHelper {
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ObjectInspectorHelper.class);
+
+  private static Map<MinorType, Class> OIMAP = new HashMap<>();
+  static {
+<#list drillOI.map as entry>
+    OIMAP.put(MinorType.${entry.minorType}, Drill${entry.holder}ObjectInspector.class);
+</#list>
+  }
+
+  public static ObjectInspector getDrillObjectInspector(MinorType drillType) {
+    if (OIMAP.containsKey(drillType)) {
+      try {
+        return (ObjectInspector)OIMAP.get(drillType).newInstance();
+      } catch(InstantiationException | IllegalAccessException e) {
+        throw new RuntimeException("Failed to instantiate ObjectInspector", e);
+      }
+    }
+
+    throw new UnsupportedOperationException(drillType.toString());
+  }
+
+  public static JBlock initReturnValueHolder(JCodeModel m, JVar returnValueHolder, ObjectInspector oi, MinorType returnType) {
+    JBlock block = new JBlock(false, false);
+    switch(oi.getCategory()) {
+      case PRIMITIVE: {
+        PrimitiveObjectInspector poi = (PrimitiveObjectInspector)oi;
+        switch(poi.getPrimitiveCategory()) {
+<#list drillOI.map as entry>
+          case ${entry.hiveType}:{
+            JType holderClass = TypeHelper.getHolderType(m, returnType, TypeProtos.DataMode.OPTIONAL);
+            block.assign(returnValueHolder, JExpr._new(holderClass));
+
+          <#if entry.hiveType == "VARCHAR" || entry.hiveType == "STRING" || entry.hiveType == "BINARY">
+            block.assign(returnValueHolder.ref("buffer"),
+              m.directClass(io.netty.buffer.Unpooled.class.getCanonicalName())
+                .staticInvoke("wrappedBuffer")
+                .arg(JExpr.newArray(m._ref(byte.class), JExpr.lit(1000))));
+          </#if>
+            return block;
+          }
+</#list>
+          default:
+            throw new UnsupportedOperationException(String.format("Received unknown/unsupported type '%s'", poi.getPrimitiveCategory().toString()));
+        }
+      }
+
+      case MAP:
+      case LIST:
+      case STRUCT:
+      default:
+        throw new UnsupportedOperationException(String.format("Received unknown/unsupported type '%s'", oi.getCategory().toString()));
+    }
+  }
+
+  private static Map<PrimitiveCategory, MinorType> TYPE_HIVE2DRILL = new HashMap<>();
+  static {
+<#list drillOI.map as entry>
+    TYPE_HIVE2DRILL.put(PrimitiveCategory.${entry.hiveType}, MinorType.${entry.minorType});
+</#list>
+  }
+
+  public static MinorType getDrillType(ObjectInspector oi) {
+    switch(oi.getCategory()) {
+      case PRIMITIVE: {
+        PrimitiveObjectInspector poi = (PrimitiveObjectInspector)oi;
+        if (TYPE_HIVE2DRILL.containsKey(poi.getPrimitiveCategory())) {
+          return TYPE_HIVE2DRILL.get(poi.getPrimitiveCategory());
+        }
+        throw new UnsupportedOperationException();
+      }
+
+      case MAP:
+      case LIST:
+      case STRUCT:
+      default:
+        throw new UnsupportedOperationException();
+    }
+  }
+
+  public static JBlock getDrillObject(JCodeModel m, ObjectInspector oi,
+    JVar returnOI, JVar returnValueHolder, JVar returnValue) {
+    JBlock block = new JBlock(false, false);
+    switch(oi.getCategory()) {
+      case PRIMITIVE: {
+        PrimitiveObjectInspector poi = (PrimitiveObjectInspector)oi;
+        switch(poi.getPrimitiveCategory()) {
+<#list drillOI.map as entry>
+          case ${entry.hiveType}:{
+            JConditional jc = block._if(returnValue.eq(JExpr._null()));
+            jc._then().assign(returnValueHolder.ref("isSet"), JExpr.lit(0));
+            jc._else().assign(returnValueHolder.ref("isSet"), JExpr.lit(1));
+            JVar castedOI = jc._else().decl(
+              m.directClass(${entry.hiveOI}.class.getCanonicalName()), "castOI", JExpr._null());
+            jc._else().assign(castedOI,
+              JExpr.cast(m.directClass(${entry.hiveOI}.class.getCanonicalName()), returnOI));
+
+          <#if entry.hiveType == "BOOLEAN">
+            JConditional booleanJC = jc._else()._if(castedOI.invoke("get").arg(returnValue));
+            booleanJC._then().assign(returnValueHolder.ref("value"), JExpr.lit(1));
+            booleanJC._else().assign(returnValueHolder.ref("value"), JExpr.lit(0));
+
+          <#elseif entry.hiveType == "VARCHAR">
+            JVar data = jc._else().decl(m.directClass(byte[].class.getCanonicalName()), "data",
+              castedOI.invoke("getPrimitiveJavaObject").arg(returnValue)
+                      .invoke("getValue")
+                      .invoke("getBytes"));
+
+            jc._else().add(returnValueHolder.ref("buffer")
+              .invoke("setBytes").arg(JExpr.lit(0)).arg(data));
+
+
+            jc._else().assign(returnValueHolder.ref("start"), JExpr.lit(0));
+            jc._else().assign(returnValueHolder.ref("end"), data.ref("length"));
+
+          <#elseif entry.hiveType == "STRING">
+            JVar data = jc._else().decl(m.directClass(byte[].class.getCanonicalName()), "data",
+              castedOI.invoke("getPrimitiveJavaObject").arg(returnValue)
+                      .invoke("getBytes").arg(DirectExpression.direct("com.google.common.base.Charsets.UTF_16")));
+            jc._else().add(returnValueHolder.ref("buffer")
+              .invoke("setBytes").arg(JExpr.lit(0)).arg(data));
+            jc._else().assign(returnValueHolder.ref("start"), JExpr.lit(0));
+            jc._else().assign(returnValueHolder.ref("end"), data.ref("length"));
+          <#elseif entry.hiveType == "BINARY">
+
+            JVar data = jc._else().decl(m.directClass(byte[].class.getCanonicalName()), "data",
+              castedOI.invoke("getPrimitiveJavaObject").arg(returnValue));
+            jc._else().add(returnValueHolder.ref("buffer")
+                .invoke("setBytes").arg(JExpr.lit(0)).arg(data));
+            jc._else().assign(returnValueHolder.ref("start"), JExpr.lit(0));
+            jc._else().assign(returnValueHolder.ref("end"), data.ref("length"));
+
+          <#else>
+            jc._else().assign(returnValueHolder.ref("value"),
+              castedOI.invoke("get").arg(returnValue));
+          </#if>
+            return block;
+          }
+
+</#list>
+          default:
+            throw new UnsupportedOperationException(String.format("Received unknown/unsupported type '%s'", poi.getPrimitiveCategory().toString()));
+        }
+      }
+
+      case MAP:
+      case LIST:
+      case STRUCT:
+      default:
+        throw new UnsupportedOperationException(String.format("Received unknown/unsupported type '%s'", oi.getCategory().toString()));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/codegen/templates/ObjectInspectors.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/codegen/templates/ObjectInspectors.java b/contrib/storage-hive/core/src/main/codegen/templates/ObjectInspectors.java
new file mode 100644
index 0000000..9a8c837
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/codegen/templates/ObjectInspectors.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+<@pp.dropOutputFile />
+
+<#list drillOI.map as entry>
+<@pp.changeOutputFile name="/org/apache/drill/exec/expr/fn/impl/hive/Drill${entry.holder}ObjectInspector.java" />
+
+<#include "/@includes/license.ftl" />
+
+package org.apache.drill.exec.expr.fn.impl.hive;
+
+import org.apache.drill.exec.expr.holders.*;
+import org.apache.hadoop.hive.common.type.HiveVarchar;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.*;
+import org.apache.hadoop.io.Text;
+
+public class Drill${entry.holder}ObjectInspector extends AbstractPrimitiveObjectInspector
+  implements ${entry.hiveOI} {
+
+  @Override
+  public String getTypeName() {
+    return serdeConstants.${entry.serdeConstant};
+  }
+
+<#if entry.minorType == "VARCHAR">
+  @Override
+  public HiveVarcharWritable getPrimitiveWritableObject(Object o) {
+    HiveVarcharWritable valW = new HiveVarcharWritable();
+    valW.set(getPrimitiveJavaObject(o));
+    return valW;
+  }
+
+  @Override
+  public HiveVarchar getPrimitiveJavaObject(Object o) {
+    String val = ((VarCharHolder)o).toString();
+    return new HiveVarchar(val, HiveVarchar.MAX_VARCHAR_LENGTH);
+  }
+<#elseif entry.minorType == "VAR16CHAR">
+@Override
+  public Text getPrimitiveWritableObject(Object o) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public String getPrimitiveJavaObject(Object o) {
+    if (o instanceof Var16CharHolder)
+    return ((Var16CharHolder)o).toString();
+    else
+    return ((NullableVar16CharHolder)o).toString();
+  }
+<#elseif entry.minorType == "VARBINARY">  
+@Override
+public org.apache.hadoop.io.BytesWritable getPrimitiveWritableObject(Object o) {
+  throw new UnsupportedOperationException();
+}
+
+@Override
+public byte[] getPrimitiveJavaObject(Object o) {
+  if (o instanceof VarBinaryHolder){
+    VarBinaryHolder h = (VarBinaryHolder)o;
+    byte[] buf = new byte[h.end-h.start];
+    h.buffer.getBytes(h.start, buf, 0, h.end-h.start);
+    return buf;
+  }else{
+    NullableVarBinaryHolder h = (NullableVarBinaryHolder)o;
+    byte[] buf = new byte[h.end-h.start];
+    h.buffer.getBytes(h.start, buf, 0, h.end-h.start);
+    return buf;
+    
+  }
+}
+<#elseif entry.minorType == "BIT">
+  @Override
+  public boolean get(Object o) {
+    if (o instanceof BitHolder)
+    return ((BitHolder)o).value == 0 ? false : true;
+    else
+    return ((NullableBitHolder)o).value == 0 ? false : true;
+  }
+<#else>
+  @Override
+  public ${entry.javaType} get(Object o) {
+    if (o instanceof ${entry.holder}Holder)
+    return ((${entry.holder}Holder)o).value;
+    else
+    return ((Nullable${entry.holder}Holder)o).value;
+  }
+</#if>
+
+  @Override
+  public PrimitiveCategory getPrimitiveCategory() {
+    return PrimitiveCategory.${entry.hiveType};
+  }
+}
+
+</#list>
+

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
new file mode 100644
index 0000000..fd19e3d
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.expr;
+
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.drill.common.expression.ExpressionPosition;
+import org.apache.drill.common.expression.FunctionHolderExpression;
+import org.apache.drill.common.expression.LogicalExpression;
+import org.apache.drill.common.expression.fn.FuncHolder;
+import org.apache.drill.common.expression.visitors.ExprVisitor;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+
+import org.apache.drill.exec.expr.fn.HiveFuncHolder;
+
+public class HiveFuncHolderExpr extends FunctionHolderExpression implements Iterable<LogicalExpression>{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillFuncHolderExpr.class);
+  private HiveFuncHolder holder;
+
+  public HiveFuncHolderExpr(String nameUsed, HiveFuncHolder holder, List<LogicalExpression> args, ExpressionPosition pos) {
+    super(nameUsed, pos, args);
+    this.holder = holder;
+  }
+
+  @Override
+  public MajorType getMajorType() {
+    return holder.getReturnType();
+  }
+
+  @Override
+  public Iterator<LogicalExpression> iterator() {
+    return args.iterator();
+  }
+
+  public FuncHolder getHolder() {
+    return holder;
+  }
+
+  @Override
+  public boolean isAggregating() {
+    return holder.isAggregating();
+  }
+
+  @Override
+  public boolean argConstantOnly(int i) {
+    // looks like hive UDF has no notion of constant argument input
+    return false;
+  }
+
+  @Override
+  public boolean isRandom() {
+    return holder.isRandom();
+  }
+  
+  @Override
+  public HiveFuncHolderExpr copy(List<LogicalExpression> args) {
+    return new HiveFuncHolderExpr(this.nameUsed, this.holder, args, this.getPosition());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
new file mode 100644
index 0000000..813d4c5
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
@@ -0,0 +1,272 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.expr.fn;
+
+import com.sun.codemodel.JBlock;
+import com.sun.codemodel.JCatchBlock;
+import com.sun.codemodel.JClass;
+import com.sun.codemodel.JCodeModel;
+import com.sun.codemodel.JConditional;
+import com.sun.codemodel.JExpr;
+import com.sun.codemodel.JInvocation;
+import com.sun.codemodel.JTryBlock;
+import com.sun.codemodel.JVar;
+import org.apache.drill.common.expression.ExpressionPosition;
+import org.apache.drill.common.expression.FunctionHolderExpression;
+import org.apache.drill.common.expression.LogicalExpression;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.exec.expr.ClassGenerator;
+import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
+import org.apache.drill.exec.expr.HiveFuncHolderExpr;
+import org.apache.drill.exec.expr.TypeHelper;
+import org.apache.drill.exec.expr.fn.impl.hive.DrillDeferredObject;
+import org.apache.drill.exec.expr.fn.impl.hive.ObjectInspectorHelper;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.ql.exec.UDF;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+
+import java.util.List;
+
+public class HiveFuncHolder extends AbstractFuncHolder {
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FunctionImplementationRegistry.class);
+
+  private MajorType[] argTypes;
+  private ObjectInspector returnOI;
+  private MajorType returnType;
+  private Class<? extends GenericUDF> genericUdfClazz;
+  private boolean isGenericUDF = true;
+  private Class<? extends UDF> udfClazz = null;
+  private String udfName = "";
+  private boolean isRandom;
+
+
+  /**
+   * Create holder for GenericUDF
+   * @param genericUdfClazz implementation class
+   * @param argTypes
+   * @param returnOI
+   * @param returnType
+   */
+  public HiveFuncHolder(Class<? extends GenericUDF> genericUdfClazz, MajorType[] argTypes,
+                        ObjectInspector returnOI, MajorType returnType, boolean isRandom) {
+    this.genericUdfClazz = genericUdfClazz;
+    this.argTypes = argTypes;
+    this.returnOI = returnOI;
+    this.returnType = returnType;
+    this.isRandom = isRandom;
+  }
+
+  /**
+   * Create holder for UDF
+   * @param udfName name of the UDF class
+   * @param udfClazz UDF implementation class
+   * @param argTypes
+   * @param returnOI
+   * @param returnType
+   */
+  public HiveFuncHolder(String udfName, Class< ? extends UDF> udfClazz, MajorType[] argTypes,
+                        ObjectInspector returnOI, MajorType returnType, boolean isRandom) {
+    this(GenericUDFBridge.class, argTypes, returnOI, returnType, isRandom);
+    this.isGenericUDF = false;
+    this.udfClazz = udfClazz;
+    this.udfName = udfName;
+  }
+
+  /**
+   * UDF return type
+   */
+  public MajorType getReturnType() {
+    return returnType;
+  }
+
+  /**
+   * Aggregate function
+   */
+  public boolean isAggregating() {
+    // currently only simple UDFS are supported
+    return false;
+  }
+
+  /**
+   * is the function non-deterministic?
+   */
+  public boolean isRandom() {
+    return isRandom;
+  }
+
+  /**
+   * Start generating code
+   * @return workspace variables
+   */
+  @Override
+  public JVar[] renderStart(ClassGenerator<?> g, HoldingContainer[] inputVariables){
+    JVar[] workspaceJVars = new JVar[5];
+
+    workspaceJVars[0] = g.declareClassField("returnOI", g.getModel()._ref(ObjectInspector.class));
+    workspaceJVars[1] = g.declareClassField("udfInstance", g.getModel()._ref(GenericUDF.class));
+    workspaceJVars[2] = g.declareClassField("deferredObjects", g.getModel()._ref(DrillDeferredObject[].class));
+    workspaceJVars[3] = g.declareClassField("arguments", g.getModel()._ref(DrillDeferredObject[].class));
+    workspaceJVars[4] = g.declareClassField("returnValueHolder",
+      TypeHelper.getHolderType(g.getModel(), returnType.getMinorType(), TypeProtos.DataMode.OPTIONAL));
+
+    return workspaceJVars;
+  }
+
+  /**
+   * Complete code generation
+   * @param g
+   * @param inputVariables
+   * @param workspaceJVars
+   * @return HoldingContainer for return value
+   */
+  @Override
+  public HoldingContainer renderEnd(ClassGenerator<?> g, HoldingContainer[] inputVariables, JVar[]  workspaceJVars) {
+    generateSetup(g, workspaceJVars);
+    return generateEval(g, inputVariables, workspaceJVars);
+  }
+
+  private JInvocation getUDFInstance(JCodeModel m) {
+    if (isGenericUDF) {
+      return JExpr._new(m.directClass(genericUdfClazz.getCanonicalName()));
+    } else {
+      return JExpr._new(m.directClass(GenericUDFBridge.class.getCanonicalName()))
+        .arg(JExpr.lit(udfName))
+        .arg(JExpr.lit(false))
+        .arg(JExpr.dotclass(m.directClass(udfClazz.getCanonicalName())));
+    }
+  }
+
+  @Override
+  public FunctionHolderExpression getExpr(String name, List<LogicalExpression> args, ExpressionPosition pos) {
+    return new HiveFuncHolderExpr(name, this, args, pos);
+  }
+
+  private void generateSetup(ClassGenerator<?> g, JVar[] workspaceJVars) {
+    JCodeModel m = g.getModel();
+    JBlock sub = new JBlock(true, true);
+
+    // declare and instantiate argument ObjectInspector's
+    JVar oiArray = sub.decl(
+      m._ref(ObjectInspector[].class),
+      "argOIs",
+      JExpr.newArray(m._ref(ObjectInspector.class), argTypes.length));
+
+    JClass oih = m.directClass(ObjectInspectorHelper.class.getCanonicalName());
+    JClass mt = m.directClass(TypeProtos.MinorType.class.getCanonicalName());
+    for(int i=0; i<argTypes.length; i++) {
+      sub.assign(
+        oiArray.component(JExpr.lit(i)),
+        oih.staticInvoke("getDrillObjectInspector")
+          .arg(mt.staticInvoke("valueOf")
+            .arg(JExpr.lit(argTypes[i].getMinorType().getNumber()))));
+    }
+
+    // declare and instantiate DeferredObject array
+    sub.assign(workspaceJVars[2], JExpr.newArray(m._ref(DrillDeferredObject.class), argTypes.length));
+
+    for(int i=0; i<argTypes.length; i++) {
+      sub.assign(
+        workspaceJVars[2].component(JExpr.lit(i)),
+        JExpr._new(m.directClass(DrillDeferredObject.class.getCanonicalName())));
+    }
+
+    // declare empty array for argument deferred objects
+    sub.assign(workspaceJVars[3], JExpr.newArray(m._ref(DrillDeferredObject.class), argTypes.length));
+
+    // create new instance of the UDF class
+    sub.assign(workspaceJVars[1], getUDFInstance(m));
+
+    // create try..catch block to initialize the UDF instance with argument OIs
+    JTryBlock udfInitTry = sub._try();
+    udfInitTry.body().assign(
+      workspaceJVars[0],
+      workspaceJVars[1].invoke("initialize")
+      .arg(oiArray));
+
+    JCatchBlock udfInitCatch = udfInitTry._catch(m.directClass(Exception.class.getCanonicalName()));
+    JVar exVar = udfInitCatch.param("ex");
+    udfInitCatch.body()
+      ._throw(JExpr._new(m.directClass(RuntimeException.class.getCanonicalName()))
+        .arg(JExpr.lit(String.format("Failed to initialize GenericUDF"))).arg(exVar));
+
+    sub.add(ObjectInspectorHelper.initReturnValueHolder(m, workspaceJVars[4], returnOI, returnType.getMinorType()));
+
+    // now add it to the doSetup block in Generated class
+    JBlock setup = g.getBlock(ClassGenerator.BlockType.SETUP);
+    setup.directStatement(String.format("/** start %s for function %s **/ ",
+      ClassGenerator.BlockType.SETUP.name(), genericUdfClazz.getName() + (!isGenericUDF ? "("+udfName+")" : "")));
+
+    setup.add(sub);
+
+    setup.directStatement(String.format("/** end %s for function %s **/ ",
+      ClassGenerator.BlockType.SETUP.name(), genericUdfClazz.getName() + (!isGenericUDF ? "("+udfName+")" : "")));
+  }
+
+  private HoldingContainer generateEval(ClassGenerator<?> g, HoldingContainer[] inputVariables, JVar[] workspaceJVars) {
+
+    HoldingContainer out = g.declare(returnType);
+
+    JCodeModel m = g.getModel();
+    JBlock sub = new JBlock(true, true);
+
+    // initialize DeferredObject's. For an optional type, assign the value holder only if it is not null
+    for(int i=0; i<argTypes.length; i++) {
+      if (inputVariables[i].isOptional()) {
+        JBlock conditionalBlock = new JBlock(false, false);
+        JConditional jc = conditionalBlock._if(inputVariables[i].getIsSet().ne(JExpr.lit(0)));
+        jc._then().assign(workspaceJVars[3].component(JExpr.lit(i)), workspaceJVars[2].component(JExpr.lit(i)));
+        jc._then().assign(JExpr.ref(workspaceJVars[3].component(JExpr.lit(i)), "valueHolder"), inputVariables[i].getHolder());
+        jc._else().assign(workspaceJVars[3].component(JExpr.lit(i)), JExpr._null());
+        sub.add(conditionalBlock);
+      } else {
+        sub.assign(workspaceJVars[3].component(JExpr.lit(i)), workspaceJVars[2].component(JExpr.lit(i)));
+        sub.assign(JExpr.ref(workspaceJVars[3].component(JExpr.lit(i)), "valueHolder"), inputVariables[i].getHolder());
+      }
+    }
+
+    // declare generic object for storing return value from GenericUDF.evaluate
+    JVar retVal = sub.decl(m._ref(Object.class), "ret");
+
+    // create try..catch block to call the GenericUDF instance with given input
+    JTryBlock udfEvalTry = sub._try();
+    udfEvalTry.body().assign(retVal,
+      workspaceJVars[1].invoke("evaluate").arg(workspaceJVars[3]));
+
+    JCatchBlock udfEvalCatch = udfEvalTry._catch(m.directClass(Exception.class.getCanonicalName()));
+    JVar exVar = udfEvalCatch.param("ex");
+    udfEvalCatch.body()
+      ._throw(JExpr._new(m.directClass(RuntimeException.class.getCanonicalName()))
+        .arg(JExpr.lit(String.format("GenericUDF.evaluate method failed"))).arg(exVar));
+
+    // get the ValueHolder from retVal and return ObjectInspector
+    sub.add(ObjectInspectorHelper.getDrillObject(m, returnOI, workspaceJVars[0], workspaceJVars[4], retVal));
+    sub.assign(out.getHolder(), workspaceJVars[4]);
+
+    // now add it to the doEval block in Generated class
+    JBlock setup = g.getBlock(ClassGenerator.BlockType.EVAL);
+    setup.directStatement(String.format("/** start %s for function %s **/ ",
+      ClassGenerator.BlockType.EVAL.name(), genericUdfClazz.getName() + (!isGenericUDF ? "("+udfName+")" : "")));
+    setup.add(sub);
+    setup.directStatement(String.format("/** end %s for function %s **/ ",
+      ClassGenerator.BlockType.EVAL.name(), genericUdfClazz.getName() + (!isGenericUDF ? "("+udfName+")" : "")));
+
+    return out;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java
new file mode 100644
index 0000000..b9369ed
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFunctionRegistry.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.expr.fn;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import com.google.common.collect.Sets;
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.expression.FunctionCall;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.common.util.PathScanner;
+import org.apache.drill.exec.expr.fn.impl.hive.ObjectInspectorHelper;
+import org.apache.drill.exec.planner.sql.DrillOperatorTable;
+import org.apache.drill.exec.planner.sql.HiveUDFOperator;
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDF;
+import org.apache.hadoop.hive.ql.udf.UDFType;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+
+import com.google.common.collect.ArrayListMultimap;
+
+public class HiveFunctionRegistry implements PluggableFunctionRegistry{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveFunctionRegistry.class);
+
+  private ArrayListMultimap<String, Class<? extends GenericUDF>> methodsGenericUDF = ArrayListMultimap.create();
+  private ArrayListMultimap<String, Class<? extends UDF>> methodsUDF = ArrayListMultimap.create();
+  private HashSet<Class<?>> nonDeterministicUDFs = new HashSet<>();
+
+  /**
+   * Scan the classpath for implementation of GenericUDF/UDF interfaces,
+   * extracts function annotation and store the
+   * (function name) --> (implementation class) mappings.
+   * @param config
+   */
+  public HiveFunctionRegistry(DrillConfig config){
+    Set<Class<? extends GenericUDF>> genericUDFClasses = PathScanner.scanForImplementations(GenericUDF.class, null);
+    for (Class<? extends GenericUDF> clazz : genericUDFClasses)
+      register(clazz, methodsGenericUDF);
+
+    Set<Class<? extends UDF>> udfClasses = PathScanner.scanForImplementations(UDF.class, null);
+    for (Class<? extends UDF> clazz : udfClasses)
+      register(clazz, methodsUDF);
+  }
+
+  @Override
+  public void register(DrillOperatorTable operatorTable) {
+    for (String name : Sets.union(methodsGenericUDF.asMap().keySet(), methodsUDF.asMap().keySet())) {
+      operatorTable.add(name, new HiveUDFOperator(name.toUpperCase()));
+    }
+  }
+
+  private <C,I> void register(Class<? extends I> clazz, ArrayListMultimap<String,Class<? extends I>> methods) {
+    Description desc = clazz.getAnnotation(Description.class);
+    String[] names;
+    if(desc != null){
+      names = desc.name().split(",");
+      for(int i=0; i<names.length; i++) names[i] = names[i].trim();
+    }else{
+      names = new String[]{clazz.getName().replace('.', '_')};
+    }
+    
+    UDFType type = clazz.getAnnotation(UDFType.class);
+    if (type != null && type.deterministic()) nonDeterministicUDFs.add(clazz);
+
+
+    for(int i=0; i<names.length;i++){
+      methods.put(names[i].toLowerCase(), clazz);
+    }
+  }
+
+  /**
+   * Find the UDF class for given function name and check if it accepts the given input argument
+   * types. If a match is found, create a holder and return
+   * @param call
+   * @return
+   */
+  @Override
+  public HiveFuncHolder getFunction(FunctionCall call){
+    HiveFuncHolder holder;
+    MajorType[] argTypes = new MajorType[call.args.size()];
+    ObjectInspector[] argOIs = new ObjectInspector[call.args.size()];
+    for(int i=0; i<call.args.size(); i++) {
+      argTypes[i] = call.args.get(i).getMajorType();
+      argOIs[i] = ObjectInspectorHelper.getDrillObjectInspector(argTypes[i].getMinorType());
+    }
+
+    String funcName = call.getName().toLowerCase();
+
+    // search in GenericUDF list
+    for(Class<? extends GenericUDF> clazz: methodsGenericUDF.get(funcName)) {
+      holder = matchAndCreateGenericUDFHolder(clazz, argTypes, argOIs);
+      if(holder != null)
+        return holder;
+    }
+
+    // search in UDF list
+    for (Class<? extends UDF> clazz : methodsUDF.get(funcName)) {
+      holder = matchAndCreateUDFHolder(call.getName(), clazz, argTypes, argOIs);
+      if (holder != null)
+        return holder;
+    }
+
+    return null;
+  }
+
+  private HiveFuncHolder matchAndCreateGenericUDFHolder(Class<? extends GenericUDF> udfClazz,
+                                              MajorType[] argTypes,
+                                              ObjectInspector[] argOIs) {
+    // probe UDF to find if the arg types and acceptable
+    // if acceptable create a holder object
+    try {
+      GenericUDF udfInstance = udfClazz.newInstance();
+      ObjectInspector returnOI = udfInstance.initialize(argOIs);
+      return new HiveFuncHolder(
+        udfClazz,
+        argTypes,
+        returnOI,
+        Types.optional(ObjectInspectorHelper.getDrillType(returnOI)),
+        nonDeterministicUDFs.contains(udfClazz));
+    } catch(IllegalAccessException | InstantiationException e) {
+      logger.debug("Failed to instantiate class", e);
+    } catch(Exception e) { /*ignore this*/ }
+
+    return null;
+  }
+
+  private HiveFuncHolder matchAndCreateUDFHolder(String udfName,
+                                                 Class<? extends UDF> udfClazz,
+                                                 MajorType[] argTypes,
+                                                 ObjectInspector[] argOIs) {
+    try {
+      GenericUDF udfInstance = new GenericUDFBridge(udfName, false/* is operator */, udfClazz);
+      ObjectInspector returnOI = udfInstance.initialize(argOIs);
+
+      return new HiveFuncHolder(
+        udfName,
+        udfClazz,
+        argTypes,
+        returnOI,
+        Types.optional(ObjectInspectorHelper.getDrillType(returnOI)),
+        nonDeterministicUDFs.contains(udfClazz));
+    } catch(Exception e) { /*ignore this*/ }
+
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/AbstractPrimitiveObjectInspector.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/AbstractPrimitiveObjectInspector.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/AbstractPrimitiveObjectInspector.java
new file mode 100644
index 0000000..04b552e
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/AbstractPrimitiveObjectInspector.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.expr.fn.impl.hive;
+
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.BaseTypeParams;
+
+
+public abstract class AbstractPrimitiveObjectInspector implements PrimitiveObjectInspector {
+
+  @Override
+  public Class<?> getPrimitiveWritableClass() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Object getPrimitiveWritableObject(Object o) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Class<?> getJavaPrimitiveClass() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Object getPrimitiveJavaObject(Object o) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Object copyObject(Object o) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public boolean preferWritable() {
+    return false;
+  }
+
+  @Override
+  public BaseTypeParams getTypeParams() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void setTypeParams(BaseTypeParams baseTypeParams) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Category getCategory() {
+    return Category.PRIMITIVE;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/DrillDeferredObject.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/DrillDeferredObject.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/DrillDeferredObject.java
new file mode 100644
index 0000000..fbc5f05
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/impl/hive/DrillDeferredObject.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.expr.fn.impl.hive;
+
+import org.apache.drill.exec.expr.holders.ValueHolder;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+
+public class DrillDeferredObject implements GenericUDF.DeferredObject {
+  public ValueHolder valueHolder;
+
+  @Override
+  public void prepare(int version) throws HiveException {}
+
+  @Override
+  public Object get() throws HiveException {
+    return valueHolder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java
new file mode 100644
index 0000000..71860c3
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/HiveUDFOperator.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.planner.sql;
+
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.sql.SqlCall;
+import org.eigenbase.sql.SqlCallBinding;
+import org.eigenbase.sql.SqlFunction;
+import org.eigenbase.sql.SqlFunctionCategory;
+import org.eigenbase.sql.SqlIdentifier;
+import org.eigenbase.sql.SqlOperandCountRange;
+import org.eigenbase.sql.SqlOperator;
+import org.eigenbase.sql.parser.SqlParserPos;
+import org.eigenbase.sql.type.SqlOperandCountRanges;
+import org.eigenbase.sql.type.SqlOperandTypeChecker;
+import org.eigenbase.sql.type.SqlTypeName;
+import org.eigenbase.sql.validate.SqlValidator;
+import org.eigenbase.sql.validate.SqlValidatorScope;
+
+public class HiveUDFOperator extends SqlFunction {
+
+  public HiveUDFOperator(String name) {
+    super(new SqlIdentifier(name, SqlParserPos.ZERO), DynamicReturnType.INSTANCE, null, new ArgChecker(), null,
+        SqlFunctionCategory.USER_DEFINED_FUNCTION);
+  }
+
+  @Override
+  public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+    return validator.getTypeFactory().createSqlType(SqlTypeName.ANY);
+  }
+
+  /** Argument Checker for variable number of arguments */
+  public static class ArgChecker implements SqlOperandTypeChecker {
+
+    public static ArgChecker INSTANCE = new ArgChecker();
+
+    private SqlOperandCountRange range = SqlOperandCountRanges.any();
+
+    @Override
+    public boolean checkOperandTypes(SqlCallBinding callBinding, boolean throwOnFailure) {
+      return true;
+    }
+
+    @Override
+    public SqlOperandCountRange getOperandCountRange() {
+      return range;
+    }
+
+    @Override
+    public String getAllowedSignatures(SqlOperator op, String opName) {
+      return opName + "(HiveUDF - Opaque)";
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveFieldConverter.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveFieldConverter.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveFieldConverter.java
new file mode 100644
index 0000000..5095d90
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveFieldConverter.java
@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive;
+
+import java.util.Map;
+
+import org.apache.drill.exec.vector.NullableBigIntVector;
+import org.apache.drill.exec.vector.NullableBitVector;
+import org.apache.drill.exec.vector.NullableDateVector;
+import org.apache.drill.exec.vector.NullableFloat4Vector;
+import org.apache.drill.exec.vector.NullableFloat8Vector;
+import org.apache.drill.exec.vector.NullableIntVector;
+import org.apache.drill.exec.vector.NullableSmallIntVector;
+import org.apache.drill.exec.vector.NullableTimeStampVector;
+import org.apache.drill.exec.vector.NullableTinyIntVector;
+import org.apache.drill.exec.vector.NullableVarBinaryVector;
+import org.apache.drill.exec.vector.NullableVarCharVector;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.FloatObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveVarcharObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.io.Text;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+
+import com.google.common.collect.Maps;
+
+public abstract class HiveFieldConverter {
+
+  public abstract boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex);
+
+  private static Map<PrimitiveCategory, Class< ? extends HiveFieldConverter>> primMap = Maps.newHashMap();
+
+  static {
+    primMap.put(PrimitiveCategory.BINARY, Binary.class);
+    primMap.put(PrimitiveCategory.BOOLEAN, Boolean.class);
+    primMap.put(PrimitiveCategory.BYTE, Byte.class);
+    primMap.put(PrimitiveCategory.DECIMAL, Decimal.class);
+    primMap.put(PrimitiveCategory.DOUBLE, Double.class);
+    primMap.put(PrimitiveCategory.FLOAT, Float.class);
+    primMap.put(PrimitiveCategory.INT, Int.class);
+    primMap.put(PrimitiveCategory.LONG, Long.class);
+    primMap.put(PrimitiveCategory.SHORT, Short.class);
+    primMap.put(PrimitiveCategory.STRING, String.class);
+    primMap.put(PrimitiveCategory.VARCHAR, VarChar.class);
+    primMap.put(PrimitiveCategory.TIMESTAMP, Timestamp.class);
+    primMap.put(PrimitiveCategory.DATE, Date.class);
+  }
+
+
+  public static HiveFieldConverter create(TypeInfo typeInfo) throws IllegalAccessException, InstantiationException {
+    switch (typeInfo.getCategory()) {
+      case PRIMITIVE:
+        final PrimitiveCategory pCat = ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory();
+        Class< ? extends HiveFieldConverter> clazz = primMap.get(pCat);
+        if (clazz != null)
+          return clazz.newInstance();
+
+        HiveRecordReader.throwUnsupportedHiveDataTypeError(pCat.toString());
+        break;
+
+      case LIST:
+      case MAP:
+      case STRUCT:
+      case UNION:
+      default:
+        HiveRecordReader.throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
+    }
+
+    return null;
+  }
+
+  public static class Binary extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final byte[] value = ((BinaryObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      return ((NullableVarBinaryVector) outputVV).getMutator().setSafe(outputIndex, value, 0, value.length);
+    }
+  }
+
+  public static class Boolean extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final boolean value = (boolean) ((BooleanObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      return ((NullableBitVector) outputVV).getMutator().setSafe(outputIndex, value ? 1 : 0);
+    }
+  }
+
+  public static class Byte extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final byte value = (byte) ((ByteObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      return ((NullableTinyIntVector) outputVV).getMutator().setSafe(outputIndex, value);
+    }
+  }
+
+  public static class Decimal extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final HiveDecimal value = ((HiveDecimalObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      final byte[] strBytes = value.toString().getBytes();
+      return ((NullableVarCharVector) outputVV).getMutator().setSafe(outputIndex, strBytes, 0, strBytes.length);
+    }
+  }
+
+  public static class Double extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final double value = (double) ((DoubleObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      return ((NullableFloat8Vector) outputVV).getMutator().setSafe(outputIndex, value);
+    }
+  }
+
+  public static class Float extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final float value = (float) ((FloatObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      return ((NullableFloat4Vector) outputVV).getMutator().setSafe(outputIndex, value);
+    }
+  }
+
+  public static class Int extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final int value = (int) ((IntObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      return ((NullableIntVector) outputVV).getMutator().setSafe(outputIndex, value);
+    }
+  }
+
+  public static class Long extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final long value = (long) ((LongObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      return ((NullableBigIntVector) outputVV).getMutator().setSafe(outputIndex, value);
+    }
+  }
+
+  public static class Short extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final short value = (short) ((ShortObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      return ((NullableSmallIntVector) outputVV).getMutator().setSafe(outputIndex, value);
+    }
+  }
+
+  public static class String extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final Text value = ((StringObjectInspector)oi).getPrimitiveWritableObject(hiveFieldValue);
+      final byte[] valueBytes = value.getBytes();
+      final int len = value.getLength();
+      return ((NullableVarCharVector) outputVV).getMutator().setSafe(outputIndex, valueBytes, 0, len);
+    }
+  }
+
+  public static class VarChar extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final Text value = ((HiveVarcharObjectInspector)oi).getPrimitiveWritableObject(hiveFieldValue).getTextValue();
+      final byte[] valueBytes = value.getBytes();
+      final int valueLen = value.getLength();
+      return ((NullableVarCharVector) outputVV).getMutator().setSafe(outputIndex, valueBytes, 0, valueLen);
+    }
+  }
+
+  public static class Timestamp extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final java.sql.Timestamp value = ((TimestampObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      final DateTime ts = new DateTime(value.getTime()).withZoneRetainFields(DateTimeZone.UTC);
+      return ((NullableTimeStampVector) outputVV).getMutator().setSafe(outputIndex, ts.getMillis());
+    }
+  }
+
+  public static class Date extends HiveFieldConverter {
+    public boolean setSafeValue(ObjectInspector oi, Object hiveFieldValue, ValueVector outputVV, int outputIndex) {
+      final java.sql.Date value = ((DateObjectInspector)oi).getPrimitiveJavaObject(hiveFieldValue);
+      final DateTime date = new DateTime(value.getTime()).withZoneRetainFields(DateTimeZone.UTC);
+      return ((NullableDateVector) outputVV).getMutator().setSafe(outputIndex, date.getMillis());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java
new file mode 100644
index 0000000..32f793e
--- /dev/null
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveReadEntry.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.hive;
+
+import java.util.List;
+
+import net.hydromatic.optiq.Schema.TableType;
+
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.google.common.collect.Lists;
+
+public class HiveReadEntry {
+
+  @JsonProperty("table")
+  public HiveTable table;
+  @JsonProperty("partitions")
+  public List<HiveTable.HivePartition> partitions;
+
+  @JsonIgnore
+  private List<Partition> partitionsUnwrapped = Lists.newArrayList();
+
+  @JsonCreator
+  public HiveReadEntry(@JsonProperty("table") HiveTable table, @JsonProperty("partitions") List<HiveTable.HivePartition> partitions) {
+    this.table = table;
+    this.partitions = partitions;
+    if (partitions != null) {
+      for(HiveTable.HivePartition part : partitions) {
+        partitionsUnwrapped.add(part.getPartition());
+      }
+    }
+  }
+
+  @JsonIgnore
+  public Table getTable() {
+    return table.getTable();
+  }
+
+  @JsonIgnore
+  public List<Partition> getPartitions() {
+    return partitionsUnwrapped;
+  }
+
+  @JsonIgnore
+  public TableType getJdbcTableType() {
+    if (table.getTable().getTableType().equals(org.apache.hadoop.hive.metastore.TableType.VIRTUAL_VIEW.toString())) {
+      return TableType.VIEW;
+    }
+
+    return TableType.TABLE;
+  }
+}
+


[19/32] git commit: DRILL-884: Always return a schema, even when there are no records

Posted by ja...@apache.org.
DRILL-884: Always return a schema, even when there are no records


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/a314c824
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/a314c824
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/a314c824

Branch: refs/heads/master
Commit: a314c824ba99edf0c29b004c121904847bab2c15
Parents: a3bf05d
Author: Steven Phillips <sp...@maprtech.com>
Authored: Wed Jun 4 22:24:12 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Thu Jun 19 20:30:55 2014 -0700

----------------------------------------------------------------------
 .../drill/exec/physical/impl/ScanBatch.java     | 18 ++++++++-
 .../drill/exec/physical/impl/ScreenCreator.java | 22 ++++++----
 .../exec/physical/impl/SingleSenderCreator.java |  6 ++-
 .../exec/physical/impl/TopN/TopNBatch.java      | 11 ++++-
 .../exec/physical/impl/WireRecordBatch.java     | 12 +++++-
 .../physical/impl/aggregate/HashAggBatch.java   | 25 ++++++++++--
 .../impl/aggregate/HashAggTemplate.java         |  2 +-
 .../impl/aggregate/StreamingAggBatch.java       | 27 ++++++++++---
 .../impl/aggregate/StreamingAggTemplate.java    |  9 ++++-
 .../exec/physical/impl/join/HashJoinBatch.java  |  5 ++-
 .../exec/physical/impl/join/MergeJoinBatch.java | 29 ++++++++------
 .../impl/materialize/QueryWritableBatch.java    | 24 ++++++++++-
 .../impl/mergereceiver/MergingRecordBatch.java  | 24 +++++++++++
 .../PartitionSenderRootExec.java                |  5 +--
 .../impl/project/ProjectRecordBatch.java        |  1 +
 .../impl/sort/SortRecordBatchBuilder.java       | 10 ++---
 .../IteratorValidatorBatchIterator.java         |  6 ++-
 .../physical/impl/xsort/ExternalSortBatch.java  | 40 ++++++++++++++-----
 .../impl/xsort/SingleBatchSorterTemplate.java   |  4 +-
 .../exec/record/AbstractSingleRecordBatch.java  | 11 ++++-
 .../apache/drill/exec/record/BatchSchema.java   |  9 +++++
 .../exec/record/FragmentWritableBatch.java      | 16 ++++++++
 .../drill/exec/record/RecordBatchLoader.java    |  4 +-
 .../apache/drill/exec/record/SchemaBuilder.java |  8 ++++
 .../org/apache/drill/TestExampleQueries.java    |  8 +++-
 .../test/resources/mergerecv/empty_batch.json   |  2 +-
 .../java/org/apache/drill/jdbc/DrillCursor.java | 17 +++++---
 .../drill/jdbc/test/TestJdbcDistQuery.java      | 42 +++++++++++++++-----
 .../resources/bootstrap-storage-plugins.json    |  3 ++
 29 files changed, 322 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
index 55d3f62..5f8bfb9 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
@@ -80,6 +80,8 @@ public class ScanBatch implements RecordBatch {
   private List<ValueVector> partitionVectors;
   private List<Integer> selectedPartitionColumns;
   private String partitionColumnDesignator;
+  private boolean first = true;
+  private boolean done = false;
 
   public ScanBatch(PhysicalOperator subScanConfig, FragmentContext context, Iterator<RecordReader> readers, List<String[]> partitionColumns, List<Integer> selectedPartitionColumns) throws ExecutionSetupException {
     this.context = context;
@@ -92,7 +94,7 @@ public class ScanBatch implements RecordBatch {
     this.partitionColumns = partitionColumns.iterator();
     this.partitionValues = this.partitionColumns.hasNext() ? this.partitionColumns.next() : null;
     this.selectedPartitionColumns = selectedPartitionColumns;
-    DrillConfig config = context.getConfig(); //This nonsense it is to not break all the stupid unit tests using SimpleRootExec
+    DrillConfig config = context.getConfig();
     this.partitionColumnDesignator = config == null ? "dir" : config.getString(ExecConstants.FILESYSTEM_PARTITION_COLUMN_LABEL);
     addPartitionVectors();
   }
@@ -122,11 +124,14 @@ public class ScanBatch implements RecordBatch {
   }
 
   private void releaseAssets() {
-    container.clear();
+    container.zeroVectors();
   }
 
   @Override
   public IterOutcome next() {
+    if (done) {
+      return IterOutcome.NONE;
+    }
     oContext.getStats().startProcessing();
     try {
       mutator.allocate(MAX_RECORD_CNT);
@@ -135,6 +140,14 @@ public class ScanBatch implements RecordBatch {
           if (!readers.hasNext()) {
             currentReader.cleanup();
             releaseAssets();
+            if (first) {
+              first = false;
+              done = true;
+              populatePartitionVectors();
+              container.buildSchema(SelectionVectorMode.NONE);
+              schema = container.getSchema();
+              return IterOutcome.OK_NEW_SCHEMA;
+            }
             return IterOutcome.NONE;
           }
           oContext.getStats().startSetup();
@@ -154,6 +167,7 @@ public class ScanBatch implements RecordBatch {
           return IterOutcome.STOP;
         }
       }
+      first = false;
 
       populatePartitionVectors();
       if (mutator.isNewSchema()) {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScreenCreator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScreenCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScreenCreator.java
index 86e77d8..9ad85af 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScreenCreator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScreenCreator.java
@@ -64,6 +64,7 @@ public class ScreenCreator implements RootCreator<Screen>{
     final FragmentContext context;
     final UserClientConnection connection;
     private RecordMaterializer materializer;
+    private boolean first = true;
 
     public ScreenRoot(FragmentContext context, RecordBatch incoming, Screen config) throws OutOfMemoryException {
       super(context, config);
@@ -108,13 +109,18 @@ public class ScreenCreator implements RootCreator<Screen>{
       case NONE: {
         sendCount.waitForSendComplete();
 //        context.getStats().batchesCompleted.inc(1);
-        QueryResult header = QueryResult.newBuilder() //
-            .setQueryId(context.getHandle().getQueryId()) //
-            .setRowCount(0) //
-            .setDef(RecordBatchDef.getDefaultInstance()) //
-            .setIsLastChunk(true) //
-            .build();
-        QueryWritableBatch batch = new QueryWritableBatch(header);
+        QueryWritableBatch batch;
+        if (!first) {
+          QueryResult header = QueryResult.newBuilder() //
+              .setQueryId(context.getHandle().getQueryId()) //
+              .setRowCount(0) //
+              .setDef(RecordBatchDef.getDefaultInstance()) //
+              .setIsLastChunk(true) //
+              .build();
+          batch = new QueryWritableBatch(header);
+        } else {
+          batch = QueryWritableBatch.getEmptyBatchWithSchema(context.getHandle().getQueryId(), 0, true, incoming.getSchema());
+        }
         stats.startWait();
         try {
           connection.sendResult(listener, batch);
@@ -140,6 +146,7 @@ public class ScreenCreator implements RootCreator<Screen>{
         }
         sendCount.increment();
 
+        first = false;
         return true;
       default:
         throw new UnsupportedOperationException();
@@ -181,6 +188,7 @@ public class ScreenCreator implements RootCreator<Screen>{
     }
 
 
+
   }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/SingleSenderCreator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/SingleSenderCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/SingleSenderCreator.java
index 9e91468..1b63112 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/SingleSenderCreator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/SingleSenderCreator.java
@@ -80,7 +80,8 @@ public class SingleSenderCreator implements RootCreator<SingleSender>{
       switch(out){
       case STOP:
       case NONE:
-        FragmentWritableBatch b2 = FragmentWritableBatch.getEmptyLast(handle.getQueryId(), handle.getMajorFragmentId(), handle.getMinorFragmentId(), recMajor, 0);
+        FragmentWritableBatch b2 = FragmentWritableBatch.getEmptyLastWithSchema(handle.getQueryId(), handle.getMajorFragmentId(),
+                handle.getMinorFragmentId(), recMajor, 0, incoming.getSchema());
         sendCount.increment();
         stats.startWait();
         try {
@@ -92,7 +93,8 @@ public class SingleSenderCreator implements RootCreator<SingleSender>{
 
       case OK_NEW_SCHEMA:
       case OK:
-        FragmentWritableBatch batch = new FragmentWritableBatch(false, handle.getQueryId(), handle.getMajorFragmentId(), handle.getMinorFragmentId(), recMajor, 0, incoming.getWritableBatch());
+        FragmentWritableBatch batch = new FragmentWritableBatch(false, handle.getQueryId(), handle.getMajorFragmentId(),
+                handle.getMinorFragmentId(), recMajor, 0, incoming.getWritableBatch());
         sendCount.increment();
         stats.startWait();
         try {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/TopN/TopNBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/TopN/TopNBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/TopN/TopNBatch.java
index c9cd2dd..4a5d368 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/TopN/TopNBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/TopN/TopNBatch.java
@@ -46,7 +46,9 @@ import org.apache.drill.exec.physical.impl.svremover.Copier;
 import org.apache.drill.exec.physical.impl.svremover.RemovingRecordBatch;
 import org.apache.drill.exec.record.AbstractRecordBatch;
 import org.apache.drill.exec.record.BatchSchema;
+import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 import org.apache.drill.exec.record.ExpandableHyperContainer;
+import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.record.RecordBatch;
 import org.apache.drill.exec.record.TypedFieldId;
 import org.apache.drill.exec.record.VectorAccessible;
@@ -113,7 +115,14 @@ public class TopNBatch extends AbstractRecordBatch<TopN> {
     return sv4;
   }
 
-
+  @Override
+  public BatchSchema getSchema() {
+    List<MaterializedField> fields = Lists.newArrayList();
+    for (MaterializedField field : incoming.getSchema()) {
+      fields.add(field);
+    }
+    return BatchSchema.newBuilder().addFields(fields).setSelectionVectorMode(SelectionVectorMode.FOUR_BYTE).build();
+  }
 
   @Override
   public void cleanup() {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/WireRecordBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/WireRecordBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/WireRecordBatch.java
index 2a19ba7..1eae0c9 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/WireRecordBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/WireRecordBatch.java
@@ -48,6 +48,7 @@ public class WireRecordBatch implements RecordBatch {
   private FragmentContext context;
   private BatchSchema schema;
   private OperatorStats stats;
+  private boolean first = true;
 
 
   public WireRecordBatch(FragmentContext context, RawFragmentBatchProvider fragProvider, RandomReceiver config) throws OutOfMemoryException {
@@ -114,13 +115,22 @@ public class WireRecordBatch implements RecordBatch {
         batch = fragProvider.getNext();
 
         // skip over empty batches. we do this since these are basically control messages.
-        while(batch != null && !batch.getHeader().getIsOutOfMemory() && batch.getHeader().getDef().getRecordCount() == 0){
+        while(batch != null && !batch.getHeader().getIsOutOfMemory() && batch.getHeader().getDef().getRecordCount() == 0 && !first){
+          if (first) {
+            first = false;
+            RecordBatchDef rbd = batch.getHeader().getDef();
+            batchLoader.load(rbd, batch.getBody());
+            batch.release();
+            schema = batchLoader.getSchema().clone();
+            batchLoader.clear();
+          }
           batch = fragProvider.getNext();
         }
       } finally {
         stats.stopWait();
       }
 
+      first = false;
 
       if (batch == null){
         batchLoader.clear();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggBatch.java
index 6adc304..3609c02 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggBatch.java
@@ -40,9 +40,11 @@ import org.apache.drill.exec.expr.ValueVectorWriteExpression;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.physical.config.HashAggregate;
 import org.apache.drill.exec.record.AbstractRecordBatch;
+import org.apache.drill.exec.record.BatchSchema;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.record.RecordBatch;
+import org.apache.drill.exec.record.RecordBatch.IterOutcome;
 import org.apache.drill.exec.record.TypedFieldId;
 import org.apache.drill.exec.record.selection.SelectionVector2;
 import org.apache.drill.exec.record.selection.SelectionVector4;
@@ -65,6 +67,7 @@ public class HashAggBatch extends AbstractRecordBatch<HashAggregate> {
   private LogicalExpression[] aggrExprs;
   private TypedFieldId[] groupByOutFieldIds ;
   private TypedFieldId[] aggrOutFieldIds ;      // field ids for the outgoing batch
+  private boolean first = true;
 
   private final GeneratorMapping UPDATE_AGGR_INSIDE =
     GeneratorMapping.create("setupInterior" /* setup method */, "updateAggrValuesInternal" /* eval method */,
@@ -90,12 +93,16 @@ public class HashAggBatch extends AbstractRecordBatch<HashAggregate> {
 
   @Override
   public IterOutcome innerNext() {
+    if (done) {
+      return IterOutcome.NONE;
+    }
     // this is only called on the first batch. Beyond this, the aggregator manages batches.
     if (aggregator == null) {
       IterOutcome outcome = next(incoming);
       logger.debug("Next outcome of {}", outcome);
       switch (outcome) {
       case NONE:
+        throw new UnsupportedOperationException("Received NONE on first batch");
       case NOT_YET:
       case STOP:
         return outcome;
@@ -118,7 +125,13 @@ public class HashAggBatch extends AbstractRecordBatch<HashAggregate> {
 
   if (aggregator.buildComplete() && ! aggregator.allFlushed()) {
     // aggregation is complete and not all records have been output yet
-    return aggregator.outputCurrentBatch();
+    IterOutcome outcome = aggregator.outputCurrentBatch();
+    if (outcome == IterOutcome.NONE && first) {
+      first = false;
+      done = true;
+      return IterOutcome.OK_NEW_SCHEMA;
+    }
+    return outcome;
   }
 
   logger.debug("Starting aggregator doWork; incoming record count = {} ", incoming.getRecordCount());
@@ -128,11 +141,17 @@ public class HashAggBatch extends AbstractRecordBatch<HashAggregate> {
       logger.debug("Aggregator response {}, records {}", out, aggregator.getOutputCount());
       switch(out){
       case CLEANUP_AND_RETURN:
-        container.clear();
+        container.zeroVectors();
         aggregator.cleanup();
         done = true;
-        return aggregator.getOutcome();
+        // fall through
       case RETURN_OUTCOME:
+        IterOutcome outcome = aggregator.getOutcome();
+        if (outcome == IterOutcome.NONE && first) {
+          first = false;
+          done = true;
+          return IterOutcome.OK_NEW_SCHEMA;
+        }
         return aggregator.getOutcome();
       case UPDATE_AGGREGATOR:
         aggregator = null;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
index 5069a2d..935bbb3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
@@ -429,7 +429,7 @@ public abstract class HashAggTemplate implements HashAggregator {
     // get the number of groups in the batch holder corresponding to this batch index
     int batchOutputRecords = batchHolders.get(outBatchIndex).getNumGroups();
     
-    if (batchOutputRecords == 0) {
+    if (!first && batchOutputRecords == 0) {
       this.outcome = IterOutcome.NONE;
       return outcome;
     }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggBatch.java
index ec12de9..367d2c7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggBatch.java
@@ -45,6 +45,7 @@ import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.physical.config.StreamingAggregate;
 import org.apache.drill.exec.physical.impl.aggregate.StreamingAggregator.AggOutcome;
 import org.apache.drill.exec.record.AbstractRecordBatch;
+import org.apache.drill.exec.record.BatchSchema;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.record.RecordBatch;
@@ -65,6 +66,7 @@ public class StreamingAggBatch extends AbstractRecordBatch<StreamingAggregate> {
   private StreamingAggregator aggregator;
   private final RecordBatch incoming;
   private boolean done = false;
+  private boolean first = true;
 
   public StreamingAggBatch(StreamingAggregate popConfig, RecordBatch incoming, FragmentContext context) throws OutOfMemoryException {
     super(popConfig, context);
@@ -74,12 +76,17 @@ public class StreamingAggBatch extends AbstractRecordBatch<StreamingAggregate> {
   @Override
   public int getRecordCount() {
     if(done) return 0;
+    if (aggregator == null) return 0;
     return aggregator.getOutputCount();
   }
 
   @Override
   public IterOutcome innerNext() {
-    // this is only called on the first batch. Beyond this, the aggregator manages batches.
+    if (done) {
+      container.zeroVectors();
+      return IterOutcome.NONE;
+    }
+      // this is only called on the first batch. Beyond this, the aggregator manages batches.
     if (aggregator == null) {
       IterOutcome outcome = next(incoming);
       logger.debug("Next outcome of {}", outcome);
@@ -106,17 +113,25 @@ public class StreamingAggBatch extends AbstractRecordBatch<StreamingAggregate> {
       logger.debug("Aggregator response {}, records {}", out, aggregator.getOutputCount());
       switch(out){
       case CLEANUP_AND_RETURN:
-        container.clear();
+        if (!first) container.zeroVectors();
         done = true;
-        return aggregator.getOutcome();
+        // fall through
       case RETURN_OUTCOME:
-        return aggregator.getOutcome();
+        IterOutcome outcome = aggregator.getOutcome();
+        if (outcome == IterOutcome.NONE && first) {
+          first = false;
+          done = true;
+          return IterOutcome.OK_NEW_SCHEMA;
+        }
+        first = false;
+        return outcome;
       case UPDATE_AGGREGATOR:
+        first = false;
         aggregator = null;
         if(!createAggregator()){
           return IterOutcome.STOP;
-        }
-        continue;
+      }
+      continue;
       default:
         throw new IllegalStateException(String.format("Unknown state %s.", out));
       }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggTemplate.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggTemplate.java
index e73f21b..3bd861d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggTemplate.java
@@ -58,7 +58,7 @@ public abstract class StreamingAggTemplate implements StreamingAggregator {
     this.allocators = allocators;
     this.outgoing = outgoing;
     setupInterior(incoming, outgoing);
-    this.currentIndex = this.getVectorIndex(underlyingIndex);
+    this.currentIndex = incoming.getRecordCount() == 0 ? 0 : this.getVectorIndex(underlyingIndex);
   }
 
 
@@ -158,13 +158,18 @@ public abstract class StreamingAggTemplate implements StreamingAggregator {
 
         try{
           while(true){
+            if (previous != null) {
+              previous.clear();
+            }
             previous = new InternalBatch(incoming);
             IterOutcome out = outgoing.next(0, incoming);
             if(EXTRA_DEBUG) logger.debug("Received IterOutcome of {}", out);
             switch(out){
             case NONE:
               lastOutcome = out;
-              if(addedRecordCount > 0){
+              if (first && addedRecordCount == 0) {
+                return setOkAndReturn();
+              } else if(addedRecordCount > 0){
                 if( !outputToBatchPrev( previous, previousIndex, outputCount) ) remainderBatch = previous;
                 if(EXTRA_DEBUG) logger.debug("Received no more batches, returning.");
                 return setOkAndReturn();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
index 11368e3..1c028d0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
@@ -109,6 +109,8 @@ public class HashJoinBatch extends AbstractRecordBatch<HashJoinPOP> {
     // Schema of the build side
     private BatchSchema rightSchema = null;
 
+    private boolean first = true;
+
     // Generator mapping for the build side
     private static final GeneratorMapping PROJECT_BUILD = GeneratorMapping.create("doSetup"/* setup method */,
                                                                                   "projectBuildRecord" /* eval method */,
@@ -187,7 +189,8 @@ public class HashJoinBatch extends AbstractRecordBatch<HashJoinPOP> {
                  * 2. We've filled up the outgoing batch to the maximum and we need to return upstream
                  * Either case build the output container's schema and return
                  */
-                if (outputRecords > 0) {
+                if (outputRecords > 0 || first) {
+                  first = false;
 
                   // Build the container schema and set the counts
                   container.buildSchema(BatchSchema.SelectionVectorMode.NONE);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/MergeJoinBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/MergeJoinBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/MergeJoinBatch.java
index e32b653..6943d1a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/MergeJoinBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/MergeJoinBatch.java
@@ -110,6 +110,7 @@ public class MergeJoinBatch extends AbstractRecordBatch<MergeJoinPOP> {
   private final JoinRelType joinType;
   private JoinWorker worker;
   public MergeJoinBatchBuilder batchBuilder;
+  private boolean done = false;
 
   protected MergeJoinBatch(MergeJoinPOP popConfig, FragmentContext context, RecordBatch left, RecordBatch right) throws OutOfMemoryException {
     super(popConfig, context);
@@ -136,6 +137,9 @@ public class MergeJoinBatch extends AbstractRecordBatch<MergeJoinPOP> {
 
   @Override
   public IterOutcome innerNext() {
+    if (done) {
+      return IterOutcome.NONE;
+    }
     // we do this in the here instead of the constructor because don't necessary want to start consuming on construction.
     status.ensureInitial();
 
@@ -190,9 +194,10 @@ public class MergeJoinBatch extends AbstractRecordBatch<MergeJoinPOP> {
         kill();
         return IterOutcome.STOP;
       case NO_MORE_DATA:
-        logger.debug("NO MORE DATA; returning {}", (status.getOutPosition() > 0 ? (first ? "OK_NEW_SCHEMA" : "OK") : "NONE"));
+        logger.debug("NO MORE DATA; returning {}", (status.getOutPosition() > 0 ? (first ? "OK_NEW_SCHEMA" : "OK") : (first ? "OK_NEW_SCHEMA" :"NONE")));
         setRecordCountInContainer();
-        return status.getOutPosition() > 0 ? (first ? IterOutcome.OK_NEW_SCHEMA : IterOutcome.OK): IterOutcome.NONE;
+        done = true;
+        return status.getOutPosition() > 0 ? (first ? IterOutcome.OK_NEW_SCHEMA : IterOutcome.OK): (first ? IterOutcome.OK_NEW_SCHEMA : IterOutcome.NONE);
       case SCHEMA_CHANGED:
         worker = null;
         if(status.getOutPosition() > 0){
@@ -349,7 +354,7 @@ public class MergeJoinBatch extends AbstractRecordBatch<MergeJoinPOP> {
     //////////////////////
     cg.setMappingSet(copyLeftMapping);
     int vectorId = 0;
-    if (status.isLeftPositionAllowed()) {
+    if (worker == null || status.isLeftPositionAllowed()) {
       for (VectorWrapper<?> vw : left) {
         MajorType inputType = vw.getField().getType();
         MajorType outputType;
@@ -379,7 +384,7 @@ public class MergeJoinBatch extends AbstractRecordBatch<MergeJoinPOP> {
     cg.setMappingSet(copyRightMappping);
 
     int rightVectorBase = vectorId;
-    if (status.isRightPositionAllowed()) {
+    if (worker == null || status.isRightPositionAllowed()) {
       for (VectorWrapper<?> vw : right) {
         MajorType inputType = vw.getField().getType();
         MajorType outputType;
@@ -414,12 +419,12 @@ public class MergeJoinBatch extends AbstractRecordBatch<MergeJoinPOP> {
     container.clear();
 
     //estimation of joinBatchSize : max of left/right size, expanded by a factor of 16, which is then bounded by MAX_BATCH_SIZE.
-    int leftCount = status.isLeftPositionAllowed() ? left.getRecordCount() : 0;
-    int rightCount = status.isRightPositionAllowed() ? right.getRecordCount() : 0;
+    int leftCount = worker == null ? left.getRecordCount() : (status.isLeftPositionAllowed() ? left.getRecordCount() : 0);
+    int rightCount = worker == null ? left.getRecordCount() : (status.isRightPositionAllowed() ? right.getRecordCount() : 0);
     int joinBatchSize = Math.min(Math.max(leftCount, rightCount) * 16, MAX_BATCH_SIZE);
 
     // add fields from both batches
-    if (leftCount > 0) {
+    if (worker == null || leftCount > 0) {
 
       for (VectorWrapper<?> w : left) {
         MajorType inputType = w.getField().getType();
@@ -430,12 +435,12 @@ public class MergeJoinBatch extends AbstractRecordBatch<MergeJoinPOP> {
           outputType = inputType;
         }
         ValueVector outgoingVector = TypeHelper.getNewVector(MaterializedField.create(w.getField().getPath(), outputType), oContext.getAllocator());
-        VectorAllocator.getAllocator(outgoingVector, (int) Math.ceil(w.getValueVector().getBufferSize() / left.getRecordCount())).alloc(joinBatchSize);
+        VectorAllocator.getAllocator(outgoingVector, (int) Math.ceil(w.getValueVector().getBufferSize() / Math.max(1, left.getRecordCount()))).alloc(joinBatchSize);
         container.add(outgoingVector);
       }
     }
 
-    if (rightCount > 0) {
+    if (worker == null || rightCount > 0) {
       for (VectorWrapper<?> w : right) {
         MajorType inputType = w.getField().getType();
         MajorType outputType;
@@ -445,7 +450,7 @@ public class MergeJoinBatch extends AbstractRecordBatch<MergeJoinPOP> {
           outputType = inputType;
         }
         ValueVector outgoingVector = TypeHelper.getNewVector(MaterializedField.create(w.getField().getPath(), outputType), oContext.getAllocator());
-        VectorAllocator.getAllocator(outgoingVector, (int) Math.ceil(w.getValueVector().getBufferSize() / right.getRecordCount())).alloc(joinBatchSize);
+        VectorAllocator.getAllocator(outgoingVector, (int) Math.ceil(w.getValueVector().getBufferSize() / Math.max(1, right.getRecordCount()))).alloc(joinBatchSize);
         container.add(outgoingVector);
       }
     }
@@ -465,7 +470,7 @@ public class MergeJoinBatch extends AbstractRecordBatch<MergeJoinPOP> {
 
       // materialize value vector readers from join expression
       LogicalExpression materializedLeftExpr;
-      if (status.isLeftPositionAllowed()) {
+      if (worker == null || status.isLeftPositionAllowed()) {
         materializedLeftExpr = ExpressionTreeMaterializer.materialize(leftFieldExpr, left, collector, context.getFunctionRegistry());
       } else {
         materializedLeftExpr = new TypedNullConstant(Types.optional(MinorType.INT));
@@ -475,7 +480,7 @@ public class MergeJoinBatch extends AbstractRecordBatch<MergeJoinPOP> {
             "Failure while trying to materialize incoming left field.  Errors:\n %s.", collector.toErrorString()));
 
       LogicalExpression materializedRightExpr;
-      if (status.isRightPositionAllowed()) {
+      if (worker == null || status.isRightPositionAllowed()) {
         materializedRightExpr = ExpressionTreeMaterializer.materialize(rightFieldExpr, right, collector, context.getFunctionRegistry());
       } else {
         materializedRightExpr = new TypedNullConstant(Types.optional(MinorType.INT));

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/materialize/QueryWritableBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/materialize/QueryWritableBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/materialize/QueryWritableBatch.java
index aba7370..c219cce 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/materialize/QueryWritableBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/materialize/QueryWritableBatch.java
@@ -18,10 +18,18 @@
 package org.apache.drill.exec.physical.impl.materialize;
 
 import java.util.Arrays;
+import java.util.List;
 
+import com.google.common.collect.Lists;
 import io.netty.buffer.ByteBuf;
 
+import org.apache.drill.exec.proto.UserBitShared.QueryId;
 import org.apache.drill.exec.proto.UserBitShared.QueryResult;
+import org.apache.drill.exec.proto.UserBitShared.RecordBatchDef;
+import org.apache.drill.exec.proto.UserBitShared.SerializedField;
+import org.apache.drill.exec.record.BatchSchema;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.record.WritableBatch;
 
 public class QueryWritableBatch {
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(QueryWritableBatch.class);
@@ -48,6 +56,20 @@ public class QueryWritableBatch {
   public String toString() {
     return "QueryWritableBatch [header=" + header + ", buffers=" + Arrays.toString(buffers) + "]";
   }
-  
+
+  public static QueryWritableBatch getEmptyBatchWithSchema(QueryId queryId, int rowCount, boolean isLastChunk, BatchSchema schema) {
+    List<SerializedField> fields = Lists.newArrayList();
+    for (MaterializedField field : schema) {
+      fields.add(field.getAsBuilder().build());
+    }
+    RecordBatchDef def = RecordBatchDef.newBuilder().addAllField(fields).build();
+    QueryResult header = QueryResult.newBuilder() //
+            .setQueryId(queryId) //
+            .setRowCount(rowCount) //
+            .setDef(def) //
+            .setIsLastChunk(isLastChunk) //
+            .build();
+    return new QueryWritableBatch(header);
+  }
   
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/mergereceiver/MergingRecordBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/mergereceiver/MergingRecordBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/mergereceiver/MergingRecordBatch.java
index a5d80b0..9351844 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/mergereceiver/MergingRecordBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/mergereceiver/MergingRecordBatch.java
@@ -46,6 +46,7 @@ import org.apache.drill.exec.physical.config.MergingReceiverPOP;
 import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.record.AbstractRecordBatch;
 import org.apache.drill.exec.record.BatchSchema;
+import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 import org.apache.drill.exec.record.ExpandableHyperContainer;
 import org.apache.drill.exec.record.RawFragmentBatch;
 import org.apache.drill.exec.record.RawFragmentBatchProvider;
@@ -97,6 +98,8 @@ public class MergingRecordBatch extends AbstractRecordBatch<MergingReceiverPOP>
   private int[] batchOffsets;
   private PriorityQueue <Node> pqueue;
   private List<VectorAllocator> allocators;
+  private RawFragmentBatch emptyBatch = null;
+  private boolean done = false;
 
   public static enum Metric implements MetricDef{
     NEXT_WAIT_NANOS;
@@ -134,6 +137,7 @@ public class MergingRecordBatch extends AbstractRecordBatch<MergingReceiverPOP>
   @Override
   public IterOutcome innerNext() {
     if (fragProviders.length == 0) return IterOutcome.NONE;
+    if (done) return IterOutcome.NONE;
     boolean schemaChanged = false;
 
     if (prevBatchWasFull) {
@@ -155,6 +159,7 @@ public class MergingRecordBatch extends AbstractRecordBatch<MergingReceiverPOP>
 
       // set up each (non-empty) incoming record batch
       List<RawFragmentBatch> rawBatches = Lists.newArrayList();
+      boolean firstBatch = true;
       for (RawFragmentBatchProvider provider : fragProviders) {
         RawFragmentBatch rawBatch = null;
         try {
@@ -165,12 +170,31 @@ public class MergingRecordBatch extends AbstractRecordBatch<MergingReceiverPOP>
         }
         if (rawBatch.getHeader().getDef().getRecordCount() != 0) {
           rawBatches.add(rawBatch);
+        } else if (emptyBatch == null) {
+          emptyBatch = rawBatch;
+        }
+        if (firstBatch) {
+          schema = BatchSchema.newBuilder().addSerializedFields(rawBatch.getHeader().getDef().getFieldList()).build();
         }
       }
 
       // allocate the incoming record batch loaders
       senderCount = rawBatches.size();
       if (senderCount == 0) {
+        if (firstBatch) {
+          RecordBatchLoader loader = new RecordBatchLoader(oContext.getAllocator());
+          try {
+            loader.load(emptyBatch.getHeader().getDef(), emptyBatch.getBody());
+          } catch (SchemaChangeException e) {
+            throw new RuntimeException(e);
+          }
+          for (VectorWrapper w : loader) {
+            outgoingContainer.add(w.getValueVector());
+          }
+          outgoingContainer.buildSchema(SelectionVectorMode.NONE);
+          done = true;
+          return IterOutcome.OK_NEW_SCHEMA;
+        }
         return IterOutcome.NONE;
       }
       incomingBatches = new RawFragmentBatch[senderCount];

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/partitionsender/PartitionSenderRootExec.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/partitionsender/PartitionSenderRootExec.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/partitionsender/PartitionSenderRootExec.java
index 23296fb..c4844d5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/partitionsender/PartitionSenderRootExec.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/partitionsender/PartitionSenderRootExec.java
@@ -208,18 +208,17 @@ public class PartitionSenderRootExec extends BaseRootExec {
   public void sendEmptyBatch() {
     FragmentHandle handle = context.getHandle();
     int fieldId = 0;
-    VectorContainer container = new VectorContainer();
     StatusHandler statusHandler = new StatusHandler(sendCount, context);
     for (DrillbitEndpoint endpoint : popConfig.getDestinations()) {
       FragmentHandle opposite = context.getHandle().toBuilder().setMajorFragmentId(popConfig.getOppositeMajorFragmentId()).setMinorFragmentId(fieldId).build();
       DataTunnel tunnel = context.getDataTunnel(endpoint, opposite);
-      FragmentWritableBatch writableBatch = new FragmentWritableBatch(true,
+      FragmentWritableBatch writableBatch = FragmentWritableBatch.getEmptyLastWithSchema(
               handle.getQueryId(),
               handle.getMajorFragmentId(),
               handle.getMinorFragmentId(),
               operator.getOppositeMajorFragmentId(),
               fieldId,
-              WritableBatch.getBatchNoHVWrap(0, container, false));
+              incoming.getSchema());
       stats.startWait();
       try {
         tunnel.sendRecordBatch(statusHandler, writableBatch);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/project/ProjectRecordBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/project/ProjectRecordBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/project/ProjectRecordBatch.java
index 5ee01f1..e6ddf90 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/project/ProjectRecordBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/project/ProjectRecordBatch.java
@@ -51,6 +51,7 @@ import org.apache.drill.exec.memory.OutOfMemoryException;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.physical.config.Project;
 import org.apache.drill.exec.record.AbstractSingleRecordBatch;
+import org.apache.drill.exec.record.BatchSchema;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.record.RecordBatch;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/sort/SortRecordBatchBuilder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/sort/SortRecordBatchBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/sort/SortRecordBatchBuilder.java
index bf9db9a..ba200f6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/sort/SortRecordBatchBuilder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/sort/SortRecordBatchBuilder.java
@@ -69,11 +69,12 @@ public class SortRecordBatchBuilder {
    */
   public boolean add(VectorAccessible batch){
     if(batch.getSchema().getSelectionVectorMode() == SelectionVectorMode.FOUR_BYTE) throw new UnsupportedOperationException("A sort cannot currently work against a sv4 batch.");
-    if (batch.getRecordCount() == 0)
+    if (batch.getRecordCount() == 0 && batches.size() > 0) {
       return true; // skip over empty record batches.
+    }
 
     long batchBytes = getSize(batch);
-    if (batchBytes == 0) {
+    if (batchBytes == 0 && batches.size() > 0) {
       return true;
     }
     if(batchBytes + runningBytes > maxBytes) return false; // enough data memory.
@@ -81,7 +82,6 @@ public class SortRecordBatchBuilder {
     if(!svAllocator.preAllocate(batch.getRecordCount()*4)) return false;  // sv allocation available.
 
 
-    if (batch.getRecordCount() == 0) return true;
     RecordBatchData bd = new RecordBatchData(batch);
     runningBytes += batchBytes;
     batches.put(batch.getSchema(), bd);
@@ -91,7 +91,7 @@ public class SortRecordBatchBuilder {
 
   public boolean add(RecordBatchData rbd) {
     long batchBytes = getSize(rbd.getContainer());
-    if (batchBytes == 0) {
+    if (batchBytes == 0 && batches.size() > 0) {
       return true;
     }
     if(batchBytes + runningBytes > maxBytes) {
@@ -105,7 +105,7 @@ public class SortRecordBatchBuilder {
     }
 
 
-    if (rbd.getRecordCount() == 0) return true;
+    if (rbd.getRecordCount() == 0 && batches.size() > 0) return true;
     runningBytes += batchBytes;
     batches.put(rbd.getContainer().getSchema(), rbd);
     recordCount += rbd.getRecordCount();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorBatchIterator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorBatchIterator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorBatchIterator.java
index 19f6497..ee8f37a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorBatchIterator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorBatchIterator.java
@@ -35,6 +35,7 @@ public class IteratorValidatorBatchIterator implements RecordBatch {
 
   private IterOutcome state = IterOutcome.NOT_YET;
   private final RecordBatch incoming;
+  private boolean first = true;
 
   public IteratorValidatorBatchIterator(RecordBatch incoming) {
     this.incoming = incoming;
@@ -67,7 +68,6 @@ public class IteratorValidatorBatchIterator implements RecordBatch {
 
   @Override
   public BatchSchema getSchema() {
-    validateReadState();
     return incoming.getSchema();
   }
 
@@ -110,6 +110,10 @@ public class IteratorValidatorBatchIterator implements RecordBatch {
   public IterOutcome next() {
     if(state == IterOutcome.NONE ) throw new IllegalStateException("The incoming iterator has previously moved to a state of NONE. You should not be attempting to call next() again.");
     state = incoming.next();
+    if (first && state == IterOutcome.NONE) {
+      throw new IllegalStateException("The incoming iterator returned a state of NONE on the first batch. There should always be at least one batch output before returning NONE");
+    }
+    if (first) first = !first;
 
     if(state == IterOutcome.OK || state == IterOutcome.OK_NEW_SCHEMA) {
       BatchSchema schema = incoming.getSchema();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/ExternalSortBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/ExternalSortBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/ExternalSortBatch.java
index 02b9ba0..237a631 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/ExternalSortBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/ExternalSortBatch.java
@@ -102,6 +102,8 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
   private int spillCount = 0;
   private int batchesSinceLastSpill = 0;
   private long uid;//used for spill files to ensure multiple sorts within same fragment don't clobber each others' files
+  private boolean useIncomingSchema = false;
+  private boolean first = true;
 
   public ExternalSortBatch(ExternalSort popConfig, FragmentContext context, RecordBatch incoming) throws OutOfMemoryException {
     super(popConfig, context);
@@ -144,6 +146,17 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
     return this.sv4;
   }
 
+  @Override
+  public BatchSchema getSchema() {
+    if (useIncomingSchema) {
+      List<MaterializedField> fields = Lists.newArrayList();
+      for (MaterializedField field : incoming.getSchema()) {
+        fields.add(field);
+      }
+      return BatchSchema.newBuilder().addFields(fields).setSelectionVectorMode(SelectionVectorMode.FOUR_BYTE).build();
+    }
+    return super.getSchema();
+  }
 
 
   @Override
@@ -205,6 +218,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
 //        logger.debug("Took {} us to get next", watch.elapsed(TimeUnit.MICROSECONDS));
         switch (upstream) {
         case NONE:
+          assert !first;
           break outer;
         case NOT_YET:
           throw new UnsupportedOperationException();
@@ -212,6 +226,7 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
           return upstream;
         case OK_NEW_SCHEMA:
           // only change in the case that the schema truly changes.  Artificial schema changes are ignored.
+          first = false;
           if(!incoming.getSchema().equals(schema)){
             if (schema != null) throw new UnsupportedOperationException("Sort doesn't currently support sorts with changing schemas.");
             this.schema = incoming.getSchema();
@@ -220,6 +235,9 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
           // fall through.
         case OK:
           SelectionVector2 sv2;
+//          if (incoming.getRecordCount() == 0) {
+//            break outer;
+//          }
           if (incoming.getSchema().getSelectionVectorMode() == BatchSchema.SelectionVectorMode.TWO_BYTE) {
             sv2 = incoming.getSelectionVector2();
           } else {
@@ -231,9 +249,9 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
           }
           int count = sv2.getCount();
           totalcount += count;
-          if (count == 0) {
-            break outer;
-          }
+//          if (count == 0) {
+//            break outer;
+//          }
           sorter.setup(context, sv2, incoming);
           Stopwatch w = new Stopwatch();
           w.start();
@@ -261,18 +279,20 @@ public class ExternalSortBatch extends AbstractRecordBatch<ExternalSort> {
         }
       }
 
-      if (schema == null || totalcount == 0){
-        // builder may be null at this point if the first incoming batch is empty
-        return IterOutcome.NONE;
-      }
+//      if (schema == null || totalcount == 0){
+//        builder may be null at this point if the first incoming batch is empty
+//        useIncomingSchema = true;
+//        return IterOutcome.NONE;
+//      }
 
       if (spillCount == 0) {
         Stopwatch watch = new Stopwatch();
         watch.start();
-        if (schema == null){
+//        if (schema == null){
           // builder may be null at this point if the first incoming batch is empty
-          return IterOutcome.NONE;
-        }
+//          useIncomingSchema = true;
+//          return IterOutcome.NONE;
+//        }
 
         builder = new SortRecordBatchBuilder(oContext.getAllocator(), MAX_SORT_BYTES);
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SingleBatchSorterTemplate.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SingleBatchSorterTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SingleBatchSorterTemplate.java
index 0ba84f9..3cb7641 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SingleBatchSorterTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SingleBatchSorterTemplate.java
@@ -48,7 +48,9 @@ public abstract class SingleBatchSorterTemplate implements SingleBatchSorter, In
     QuickSort qs = new QuickSort();
     Stopwatch watch = new Stopwatch();
     watch.start();
-    qs.sort(this, 0, vector2.getCount());
+    if (vector2.getCount() > 0) {
+      qs.sort(this, 0, vector2.getCount());
+    }
     logger.debug("Took {} us to sort {} records", watch.elapsed(TimeUnit.MICROSECONDS), vector2.getCount());
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/record/AbstractSingleRecordBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/AbstractSingleRecordBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/AbstractSingleRecordBatch.java
index 9473945..721755d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/AbstractSingleRecordBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/AbstractSingleRecordBatch.java
@@ -43,7 +43,6 @@ public abstract class AbstractSingleRecordBatch<T extends PhysicalOperator> exte
   @Override
   public IterOutcome innerNext() {
     IterOutcome upstream = next(incoming);
-    if(first && upstream == IterOutcome.OK) upstream = IterOutcome.OK_NEW_SCHEMA;
     if (!first && upstream == IterOutcome.OK && incoming.getRecordCount() == 0) {
       do {
         for (VectorWrapper w : incoming) {
@@ -51,15 +50,17 @@ public abstract class AbstractSingleRecordBatch<T extends PhysicalOperator> exte
         }
       } while ((upstream = next(incoming)) == IterOutcome.OK && incoming.getRecordCount() == 0);
     }
-    first = false;
+    if(first && upstream == IterOutcome.OK) upstream = IterOutcome.OK_NEW_SCHEMA;
     switch(upstream){
     case NONE:
+      assert !first;
     case NOT_YET:
     case STOP:
       return upstream;
     case OUT_OF_MEMORY:
       return upstream;
     case OK_NEW_SCHEMA:
+      first = false;
       try{
         stats.startSetup();
         setupNewSchema();
@@ -73,6 +74,7 @@ public abstract class AbstractSingleRecordBatch<T extends PhysicalOperator> exte
       }
       // fall through.
     case OK:
+      assert !first : "First batch should be OK_NEW_SCHEMA";
       doWork();
       if (outOfMemory) {
         outOfMemory = false;
@@ -91,6 +93,11 @@ public abstract class AbstractSingleRecordBatch<T extends PhysicalOperator> exte
     incoming.cleanup();
   }
 
+  @Override
+  public BatchSchema getSchema() {
+    return container.getSchema();
+  }
+
   protected abstract void setupNewSchema() throws SchemaChangeException;
   protected abstract void doWork();
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/record/BatchSchema.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/BatchSchema.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/BatchSchema.java
index b4da6e0..5af3fb8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/BatchSchema.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/BatchSchema.java
@@ -17,6 +17,9 @@
  */
 package org.apache.drill.exec.record;
 
+
+import com.google.common.collect.Lists;
+
 import java.util.Iterator;
 import java.util.List;
 
@@ -54,6 +57,12 @@ public class BatchSchema implements Iterable<MaterializedField> {
     return selectionVectorMode;
   }
 
+  public BatchSchema clone() {
+    List<MaterializedField> newFields = Lists.newArrayList();
+    newFields.addAll(fields);
+    return new BatchSchema(selectionVectorMode, newFields);
+  }
+
   @Override
   public String toString() {
     return "BatchSchema [fields=" + fields + ", selectionVector=" + selectionVectorMode + "]";

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/record/FragmentWritableBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/FragmentWritableBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/FragmentWritableBatch.java
index 33fd5a2..ef7b5f2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/FragmentWritableBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/FragmentWritableBatch.java
@@ -17,12 +17,17 @@
  */
 package org.apache.drill.exec.record;
 
+import com.google.common.collect.Lists;
 import io.netty.buffer.ByteBuf;
 
 import org.apache.drill.exec.proto.BitData.FragmentRecordBatch;
 import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
 import org.apache.drill.exec.proto.UserBitShared.QueryId;
 import org.apache.drill.exec.proto.UserBitShared.RecordBatchDef;
+import org.apache.drill.exec.proto.UserBitShared.RecordBatchDefOrBuilder;
+import org.apache.drill.exec.proto.UserBitShared.SerializedField;
+
+import java.util.List;
 
 public class FragmentWritableBatch{
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FragmentWritableBatch.class);
@@ -59,6 +64,17 @@ public class FragmentWritableBatch{
     return new FragmentWritableBatch(true, queryId, sendMajorFragmentId, sendMinorFragmentId, receiveMajorFragmentId, receiveMinorFragmentId, EMPTY_DEF);
   }
 
+  public static FragmentWritableBatch getEmptyLastWithSchema(QueryId queryId, int sendMajorFragmentId, int sendMinorFragmentId,
+                                                             int receiveMajorFragmentId, int receiveMinorFragmentId, BatchSchema schema){
+
+    List<SerializedField> fields = Lists.newArrayList();
+    for (MaterializedField field : schema) {
+      fields.add(field.getAsBuilder().build());
+    }
+    RecordBatchDef def = RecordBatchDef.newBuilder().addAllField(fields).build();
+    return new FragmentWritableBatch(true, queryId, sendMajorFragmentId, sendMinorFragmentId, receiveMajorFragmentId, receiveMinorFragmentId, def);
+  }
+
   public ByteBuf[] getBuffers(){
     return buffers;
   }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchLoader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchLoader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchLoader.java
index cac610c..33bcb3b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchLoader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/RecordBatchLoader.java
@@ -25,6 +25,7 @@ import java.util.Map;
 
 import javax.jdo.metadata.FieldMetadata;
 
+import io.netty.buffer.EmptyByteBuf;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.exec.exception.SchemaChangeException;
 import org.apache.drill.exec.expr.TypeHelper;
@@ -85,7 +86,8 @@ public class RecordBatchLoader implements VectorAccessible, Iterable<VectorWrapp
         v = TypeHelper.getNewVector(fieldDef, allocator);
       }
       if (fmd.getValueCount() == 0){
-        v.clear();
+//        v.clear();
+        v.load(fmd, new EmptyByteBuf(allocator.getUnderlyingAllocator()));
       } else {
         v.load(fmd, buf.slice(bufOffset, fmd.getBufferLength()));
       }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaBuilder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaBuilder.java
index c954354..f405585 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaBuilder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/SchemaBuilder.java
@@ -21,6 +21,7 @@ import java.util.LinkedHashSet;
 import java.util.List;
 
 import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.proto.UserBitShared.SerializedField;
 import org.apache.drill.exec.record.BatchSchema.SelectionVectorMode;
 
 import com.google.common.collect.Lists;
@@ -64,6 +65,13 @@ public class SchemaBuilder {
     }
     return this;
   }
+
+  public SchemaBuilder addSerializedFields(Iterable<SerializedField> fields) {
+    for (SerializedField f : fields) {
+      addField(MaterializedField.create(f));
+    }
+    return this;
+  }
   
 //  private void setTypedField(short fieldId, DataType type, boolean nullable, ValueMode mode, Class<?> valueClass)
 //      throws SchemaChangeException {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java b/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
index 1d6ca33..308db3d 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
@@ -23,6 +23,11 @@ import org.junit.Test;
 
 public class TestExampleQueries extends BaseTestQuery{
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestExampleQueries.class);
+
+  @Test
+  public void testQ() throws Exception {
+    test("select * from cp.`customer.json` where 0 = 1");
+  }
   
   @Test // see DRILL-553
   public void testQueryWithNullValues() throws Exception {
@@ -102,6 +107,7 @@ public class TestExampleQueries extends BaseTestQuery{
 
   @Test
   public void testJoin() throws Exception{
+    test("alter session set `planner.enable_hashjoin` = false");
     test("SELECT\n" +
         "  nations.N_NAME,\n" +
         "  regions.R_NAME\n" +
@@ -109,7 +115,7 @@ public class TestExampleQueries extends BaseTestQuery{
         "  dfs.`[WORKING_PATH]/../../sample-data/nation.parquet` nations\n" +
         "JOIN\n" +
         "  dfs.`[WORKING_PATH]/../../sample-data/region.parquet` regions\n" +
-        "  on nations.N_REGIONKEY = regions.R_REGIONKEY");
+        "  on nations.N_REGIONKEY = regions.R_REGIONKEY where 1 = 0");
   }
 
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/java-exec/src/test/resources/mergerecv/empty_batch.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/mergerecv/empty_batch.json b/exec/java-exec/src/test/resources/mergerecv/empty_batch.json
index 361c4af..55b3f7d 100644
--- a/exec/java-exec/src/test/resources/mergerecv/empty_batch.json
+++ b/exec/java-exec/src/test/resources/mergerecv/empty_batch.json
@@ -27,7 +27,7 @@
     {
       @id: 2,
       child: 1,
-      pop: "sort",
+      pop: "external-sort",
       orderings: [ {expr: "blue", order:"DESC"},  {expr: "red", order:"DESC"} ]
     },
     {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillCursor.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillCursor.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillCursor.java
index 1145b84..c2c9dd8 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillCursor.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/DrillCursor.java
@@ -20,16 +20,15 @@ package org.apache.drill.jdbc;
 import java.sql.SQLException;
 import java.util.Calendar;
 import java.util.List;
-import java.util.concurrent.LinkedBlockingDeque;
 
 import net.hydromatic.avatica.ColumnMetaData;
 import net.hydromatic.avatica.Cursor;
 
 import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.record.BatchSchema;
 import org.apache.drill.exec.record.RecordBatchLoader;
 import org.apache.drill.exec.rpc.RpcException;
 import org.apache.drill.exec.rpc.user.QueryResultBatch;
-import org.apache.drill.jdbc.DrillResultSet.Listener;
 
 public class DrillCursor implements Cursor{
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillCursor.class);
@@ -41,8 +40,10 @@ public class DrillCursor implements Cursor{
   private final RecordBatchLoader currentBatch;
   private final DrillResultSet.Listener listener;
   private boolean redoFirstNext = false;
+  private boolean first = true;
   
   private DrillColumnMetaDataList columnMetaDataList;
+  private BatchSchema schema;
   
   final DrillResultSet results;
   int currentRecord = 0;
@@ -82,11 +83,13 @@ public class DrillCursor implements Cursor{
       try {
         QueryResultBatch qrb = listener.getNext();
         recordBatchCount++;
-        while(qrb != null && qrb.getHeader().getRowCount() == 0 ){
+        while(qrb != null && qrb.getHeader().getRowCount() == 0 && !first){
           qrb.release();
           qrb = listener.getNext();
           recordBatchCount++;
         }
+
+        first = false;
         
         if(qrb == null){
           finished = true;
@@ -94,7 +97,11 @@ public class DrillCursor implements Cursor{
         }else{
           currentRecord = 0;
           boolean changed = currentBatch.load(qrb.getHeader().getDef(), qrb.getData());
+          schema = currentBatch.getSchema();
           if(changed) updateColumns();
+          if (redoFirstNext && currentBatch.getRecordCount() == 0) {
+            redoFirstNext = false;
+          }
           return true;
         }
       } catch (RpcException | InterruptedException | SchemaChangeException e) {
@@ -106,8 +113,8 @@ public class DrillCursor implements Cursor{
   
   void updateColumns(){
     accessors.generateAccessors(this, currentBatch);
-    columnMetaDataList.updateColumnMetaData(UNKNOWN, UNKNOWN, UNKNOWN, currentBatch.getSchema());
-    if(results.changeListener != null) results.changeListener.schemaChanged(currentBatch.getSchema());
+    columnMetaDataList.updateColumnMetaData(UNKNOWN, UNKNOWN, UNKNOWN, schema);
+    if(results.changeListener != null) results.changeListener.schemaChanged(schema);
   }
   
   public long getRecordBatchCount(){

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcDistQuery.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcDistQuery.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcDistQuery.java
index 30a7144..93cfce3 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcDistQuery.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcDistQuery.java
@@ -23,12 +23,15 @@ import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
 import java.sql.Statement;
+import java.util.List;
 import java.util.concurrent.TimeUnit;
 
+import com.google.common.collect.Lists;
 import org.apache.drill.common.util.TestTools;
 import org.apache.drill.exec.store.hive.HiveTestDataGenerator;
 import org.apache.drill.jdbc.Driver;
 import org.apache.drill.jdbc.JdbcTest;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -182,16 +185,17 @@ public class TestJdbcDistQuery extends JdbcTest{
         Statement s = c.createStatement();
         ResultSet r = s.executeQuery(sql);
         boolean first = true;
-        while (r.next()) {
-          ResultSetMetaData md = r.getMetaData();
-          if (first == true) {
-            for (int i = 1; i <= md.getColumnCount(); i++) {
-              System.out.print(md.getColumnName(i));
-              System.out.print('\t');
-            }
-            System.out.println();
-            first = false;
+        ResultSetMetaData md = r.getMetaData();
+        if (first == true) {
+          for (int i = 1; i <= md.getColumnCount(); i++) {
+            System.out.print(md.getColumnName(i));
+            System.out.print('\t');
           }
+          System.out.println();
+          first = false;
+        }
+        while(r.next()){
+          md = r.getMetaData();
 
           for (int i = 1; i <= md.getColumnCount(); i++) {
             System.out.print(r.getObject(i));
@@ -211,4 +215,24 @@ public class TestJdbcDistQuery extends JdbcTest{
 
 
   }
+
+  @Test
+  public void testSchemaForEmptyResultSet() throws Exception {
+    String query = "select fullname, occupation, postal_code from cp.`customer.json` where 0 = 1";
+    try (Connection c = DriverManager.getConnection("jdbc:drill:zk=local", null);) {
+      Statement s = c.createStatement();
+      ResultSet r = s.executeQuery(query);
+      ResultSetMetaData md = r.getMetaData();
+      List<String> columns = Lists.newArrayList();
+      for (int i = 1; i <= md.getColumnCount(); i++) {
+        System.out.print(md.getColumnName(i));
+        System.out.print('\t');
+        columns.add(md.getColumnName(i));
+      }
+      String[] expected = {"fullname", "occupation", "postal_code"};
+      Assert.assertEquals(3, md.getColumnCount());
+      Assert.assertArrayEquals(expected, columns.toArray());
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a314c824/exec/jdbc/src/test/resources/bootstrap-storage-plugins.json
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/resources/bootstrap-storage-plugins.json b/exec/jdbc/src/test/resources/bootstrap-storage-plugins.json
index 3861317..53600f3 100644
--- a/exec/jdbc/src/test/resources/bootstrap-storage-plugins.json
+++ b/exec/jdbc/src/test/resources/bootstrap-storage-plugins.json
@@ -32,6 +32,9 @@
         },
         "parquet" : {
           type: "parquet"
+        },
+        "json" : {
+          type: "json"
         }
       }
     },


[10/32] git commit: DRILL-1022: Increase default min hash table size and allow setting min/max size for hash table.

Posted by ja...@apache.org.
DRILL-1022: Increase default min hash table size and allow setting min/max size for hash table.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/ff39fb83
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/ff39fb83
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/ff39fb83

Branch: refs/heads/master
Commit: ff39fb8383e038aadbf4810a6b4ad5f22d25a181
Parents: 4243f54
Author: Aman Sinha <as...@maprtech.com>
Authored: Tue Jun 17 22:41:14 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Wed Jun 18 21:50:09 2014 -0700

----------------------------------------------------------------------
 .../java/org/apache/drill/exec/ExecConstants.java  |  9 +++++++++
 .../drill/exec/physical/config/HashAggregate.java  | 17 -----------------
 .../exec/physical/impl/aggregate/HashAggBatch.java | 10 +++++++++-
 .../physical/impl/aggregate/HashAggTemplate.java   |  5 +++--
 .../physical/impl/aggregate/HashAggregator.java    |  3 ++-
 .../drill/exec/physical/impl/common/HashTable.java |  2 +-
 .../exec/physical/impl/join/HashJoinBatch.java     |  5 ++++-
 .../exec/server/options/SystemOptionManager.java   |  5 +++--
 .../exec/physical/impl/join/TestHashJoin.java      | 13 +++++++++++--
 9 files changed, 42 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/ff39fb83/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 6673c4c..7681dd5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec;
 
+import org.apache.drill.exec.physical.impl.common.HashTable;
 import org.apache.drill.exec.server.options.OptionValidator;
 import org.apache.drill.exec.server.options.TypeValidators.BooleanValidator;
 import org.apache.drill.exec.server.options.TypeValidators.DoubleValidator;
@@ -86,6 +87,14 @@ public interface ExecConstants {
 
   public static final String SLICE_TARGET = "planner.slice_target";
   public static final OptionValidator SLICE_TARGET_OPTION = new PositiveLongValidator(SLICE_TARGET, Long.MAX_VALUE, 1000000);
+  
+  /**
+   * HashTable runtime settings
+   */
+  public static final String MIN_HASH_TABLE_SIZE_KEY = "exec.min_hash_table_size";
+  public static final OptionValidator MIN_HASH_TABLE_SIZE = new PositiveLongValidator(MIN_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY, HashTable.DEFAULT_INITIAL_CAPACITY);
+  public static final String MAX_HASH_TABLE_SIZE_KEY = "exec.max_hash_table_size";
+  public static final OptionValidator MAX_HASH_TABLE_SIZE = new PositiveLongValidator(MAX_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY, HashTable.MAXIMUM_CAPACITY);
 
   /**
    * Limits the maximum level of parallelization to this factor time the number of Drillbits

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/ff39fb83/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashAggregate.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashAggregate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashAggregate.java
index e4ce5f8..694570c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashAggregate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/HashAggregate.java
@@ -21,8 +21,6 @@ import org.apache.drill.common.logical.data.NamedExpression;
 import org.apache.drill.exec.physical.base.AbstractSingle;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.PhysicalVisitor;
-import org.apache.drill.exec.physical.impl.common.HashTable;
-import org.apache.drill.exec.physical.impl.common.HashTableConfig;
 import org.apache.drill.exec.proto.UserBitShared.CoreOperatorType;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
@@ -39,23 +37,12 @@ public class HashAggregate extends AbstractSingle {
 
   private final float cardinality;
 
-  // configuration parameters for the hash table
-  private final HashTableConfig htConfig;
-
   @JsonCreator
   public HashAggregate(@JsonProperty("child") PhysicalOperator child, @JsonProperty("keys") NamedExpression[] groupByExprs, @JsonProperty("exprs") NamedExpression[] aggrExprs, @JsonProperty("cardinality") float cardinality) {
     super(child);
     this.groupByExprs = groupByExprs;
     this.aggrExprs = aggrExprs;
     this.cardinality = cardinality;
-
-    int initial_capacity = cardinality > HashTable.DEFAULT_INITIAL_CAPACITY ?
-      (int) cardinality : HashTable.DEFAULT_INITIAL_CAPACITY;
-
-    this.htConfig = new HashTableConfig(initial_capacity,
-                                        HashTable.DEFAULT_LOAD_FACTOR,
-                                        groupByExprs,
-                                        null /* no probe exprs */) ;
   }
 
   public NamedExpression[] getGroupByExprs() {
@@ -70,10 +57,6 @@ public class HashAggregate extends AbstractSingle {
     return cardinality;
   }
 
-  public HashTableConfig getHtConfig() {
-    return htConfig;
-  }
-
   @Override
   public <T, X, E extends Throwable> T accept(PhysicalVisitor<T, X, E> physicalVisitor, X value) throws E{
     return physicalVisitor.visitHashAggregate(this, value);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/ff39fb83/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggBatch.java
index dd58562..6adc304 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggBatch.java
@@ -25,6 +25,7 @@ import org.apache.drill.common.expression.ErrorCollector;
 import org.apache.drill.common.expression.ErrorCollectorImpl;
 import org.apache.drill.common.expression.LogicalExpression;
 import org.apache.drill.common.logical.data.NamedExpression;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.compile.sig.GeneratorMapping;
 import org.apache.drill.exec.compile.sig.MappingSet;
 import org.apache.drill.exec.exception.ClassTransformationException;
@@ -48,6 +49,8 @@ import org.apache.drill.exec.record.selection.SelectionVector4;
 import org.apache.drill.exec.vector.ValueVector;
 import org.apache.drill.exec.vector.allocator.VectorAllocator;
 import org.apache.drill.exec.physical.impl.aggregate.HashAggregator.AggOutcome;
+import org.apache.drill.exec.physical.impl.common.HashTable;
+import org.apache.drill.exec.physical.impl.common.HashTableConfig;
 
 import com.google.common.collect.Lists;
 import com.sun.codemodel.JExpr;
@@ -220,7 +223,12 @@ public class HashAggBatch extends AbstractRecordBatch<HashAggregate> {
     container.buildSchema(SelectionVectorMode.NONE);
     HashAggregator agg = context.getImplementationClass(top);
 
-    agg.setup(popConfig, context, this.stats,
+    HashTableConfig htConfig = new HashTableConfig(context.getOptions().getOption(ExecConstants.MIN_HASH_TABLE_SIZE_KEY).num_val.intValue(),
+                                                   HashTable.DEFAULT_LOAD_FACTOR,
+                                                   popConfig.getGroupByExprs(),
+                                                   null /* no probe exprs */) ;
+    
+    agg.setup(popConfig, htConfig, context, this.stats,
               oContext.getAllocator(), incoming, this,
               aggrExprs,
               cgInner.getWorkspaceTypes(),

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/ff39fb83/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
index 72095b7..5069a2d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
@@ -171,7 +171,8 @@ public abstract class HashAggTemplate implements HashAggregator {
 
 
   @Override
-  public void setup(HashAggregate hashAggrConfig, FragmentContext context, 
+  public void setup(HashAggregate hashAggrConfig, HashTableConfig htConfig, 
+                    FragmentContext context, 
                     OperatorStats stats,
                     BufferAllocator allocator, RecordBatch incoming, HashAggBatch outgoing,
                     LogicalExpression[] valueExprs,
@@ -219,7 +220,7 @@ public abstract class HashAggTemplate implements HashAggregator {
       }
     }
 
-    ChainedHashTable ht = new ChainedHashTable(hashAggrConfig.getHtConfig(), context, allocator, incoming, null /* no incoming probe */, outgoing) ;
+    ChainedHashTable ht = new ChainedHashTable(htConfig, context, allocator, incoming, null /* no incoming probe */, outgoing) ;
     this.htable = ht.createAndSetupHashTable(groupByOutFieldIds) ;
 
     batchHolders = new ArrayList<BatchHolder>();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/ff39fb83/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggregator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggregator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggregator.java
index d14880c..b94f299 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggregator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggregator.java
@@ -29,6 +29,7 @@ import org.apache.drill.exec.memory.BufferAllocator;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.ops.OperatorStats;
 import org.apache.drill.exec.physical.config.HashAggregate;
+import org.apache.drill.exec.physical.impl.common.HashTableConfig;
 import org.apache.drill.exec.record.RecordBatch;
 import org.apache.drill.exec.record.RecordBatch.IterOutcome;
 import org.apache.drill.exec.record.TypedFieldId;
@@ -42,7 +43,7 @@ public interface HashAggregator {
     RETURN_OUTCOME, CLEANUP_AND_RETURN, UPDATE_AGGREGATOR
 	  }
   
-  public abstract void setup(HashAggregate hashAggrConfig, FragmentContext context, 
+  public abstract void setup(HashAggregate hashAggrConfig, HashTableConfig htConfig, FragmentContext context, 
                              OperatorStats stats, BufferAllocator allocator, RecordBatch incoming,
                              HashAggBatch outgoing, LogicalExpression[] valueExprs, 
                              List<TypedFieldId> valueFieldIds,

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/ff39fb83/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/common/HashTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/common/HashTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/common/HashTable.java
index 429ec63..9f5d4f8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/common/HashTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/common/HashTable.java
@@ -30,7 +30,7 @@ public interface HashTable {
   public static TemplateClassDefinition<HashTable> TEMPLATE_DEFINITION = new TemplateClassDefinition<HashTable>(HashTable.class, HashTableTemplate.class);
 
   /** The initial default capacity of the hash table (in terms of number of buckets). */
-  static final public int DEFAULT_INITIAL_CAPACITY = 1 << 8; 
+  static final public int DEFAULT_INITIAL_CAPACITY = 1 << 16; 
 
   /** The maximum capacity of the hash table (in terms of number of buckets). */
   static final public int MAXIMUM_CAPACITY = 1 << 30; 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/ff39fb83/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
index c43b99a..11368e3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
@@ -27,6 +27,7 @@ import org.apache.drill.common.logical.data.NamedExpression;
 import org.apache.drill.common.types.TypeProtos.DataMode;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.common.types.Types;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.compile.sig.GeneratorMapping;
 import org.apache.drill.exec.compile.sig.MappingSet;
 import org.apache.drill.exec.exception.ClassTransformationException;
@@ -257,7 +258,9 @@ public class HashJoinBatch extends AbstractRecordBatch<HashJoinPOP> {
           }
         }
 
-        HashTableConfig htConfig = new HashTableConfig(HashTable.DEFAULT_INITIAL_CAPACITY, HashTable.DEFAULT_LOAD_FACTOR, rightExpr, leftExpr);
+        HashTableConfig htConfig = 
+            new HashTableConfig(context.getOptions().getOption(ExecConstants.MIN_HASH_TABLE_SIZE_KEY).num_val.intValue(), 
+            HashTable.DEFAULT_LOAD_FACTOR, rightExpr, leftExpr);
 
         // Create the chained hash table
         ChainedHashTable ht  = new ChainedHashTable(htConfig, context, oContext.getAllocator(), this.right, this.left, null);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/ff39fb83/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
index 8503197..a42640f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
@@ -64,8 +64,9 @@ public class SystemOptionManager implements OptionManager{
       ExecConstants.LARGE_QUEUE_SIZE,
       ExecConstants.QUEUE_THRESHOLD_SIZE,
       ExecConstants.QUEUE_TIMEOUT,
-      ExecConstants.SMALL_QUEUE_SIZE
-
+      ExecConstants.SMALL_QUEUE_SIZE, 
+      ExecConstants.MIN_HASH_TABLE_SIZE,
+      ExecConstants.MAX_HASH_TABLE_SIZE
   };
 
   public final PStoreConfig<OptionValue> config;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/ff39fb83/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestHashJoin.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestHashJoin.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestHashJoin.java
index d4a86ca..e24426e 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestHashJoin.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestHashJoin.java
@@ -29,6 +29,8 @@ import mockit.NonStrictExpectations;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.util.FileUtils;
 import org.apache.drill.common.util.TestTools;
+import org.apache.drill.exec.cache.DistributedCache;
+import org.apache.drill.exec.cache.local.LocalCache;
 import org.apache.drill.exec.client.DrillClient;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
 import org.apache.drill.exec.memory.TopLevelAllocator;
@@ -42,7 +44,6 @@ import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.pop.PopUnitTestBase;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
 import org.apache.drill.exec.proto.CoordinationProtos;
-import org.apache.drill.exec.proto.UserProtos;
 import org.apache.drill.exec.record.RecordBatchLoader;
 import org.apache.drill.exec.record.VectorWrapper;
 import org.apache.drill.exec.rpc.user.QueryResultBatch;
@@ -50,7 +51,10 @@ import org.apache.drill.exec.rpc.user.UserServer;
 import org.apache.drill.exec.server.Drillbit;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.RemoteServiceSet;
-import org.apache.drill.exec.vector.NullableIntVector;
+import org.apache.drill.exec.server.options.OptionManager;
+import org.apache.drill.exec.server.options.SessionOptionManager;
+import org.apache.drill.exec.server.options.SystemOptionManager;
+import org.apache.drill.exec.store.sys.local.LocalPStoreProvider;
 import org.apache.drill.exec.vector.ValueVector;
 import org.junit.Rule;
 import org.junit.Test;
@@ -69,11 +73,16 @@ public class TestHashJoin extends PopUnitTestBase{
     DrillConfig c = DrillConfig.create();
 
     private void testHJMockScanCommon(final DrillbitContext bitContext, UserServer.UserClientConnection connection, String physicalPlan, int expectedRows) throws Throwable {
+      final LocalPStoreProvider provider = new LocalPStoreProvider(c);
+      provider.start();
+      final SystemOptionManager opt = new SystemOptionManager(c, provider);
+      opt.init();
         new NonStrictExpectations(){{
             bitContext.getMetrics(); result = new MetricRegistry();
             bitContext.getAllocator(); result = new TopLevelAllocator();
             bitContext.getOperatorCreatorRegistry(); result = new OperatorCreatorRegistry(c);
             bitContext.getConfig(); result = c;
+            bitContext.getOptionManager(); result = opt;
         }};
 
         PhysicalPlanReader reader = new PhysicalPlanReader(c, c.getMapper(), CoordinationProtos.DrillbitEndpoint.getDefaultInstance());


[20/32] git commit: DRILL-1044: Optimize boolean and/or operators by short-circuit and fast-success / fast-fail approach.

Posted by ja...@apache.org.
DRILL-1044: Optimize boolean and/or operators by short-circuit and fast-success / fast-fail approach.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/43bb57e7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/43bb57e7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/43bb57e7

Branch: refs/heads/master
Commit: 43bb57e758495d6c3e47e29b27e1f97b9f81f268
Parents: a314c82
Author: Jinfeng Ni <jn...@maprtech.com>
Authored: Wed Jun 18 07:11:54 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Fri Jun 20 10:54:50 2014 -0700

----------------------------------------------------------------------
 .../drill/common/expression/parser/ExprParser.g |   4 +-
 .../common/expression/BooleanOperator.java      |  72 +++++++++
 .../expression/ExpressionStringBuilder.java     |   5 +
 .../common/expression/FunctionCallFactory.java  |  23 ++-
 .../expression/FunctionHolderExpression.java    |   6 +
 .../drill/common/expression/IfExpression.java   |  18 +++
 .../common/expression/LogicalExpression.java    |   3 +
 .../expression/LogicalExpressionBase.java       |  14 +-
 .../drill/common/expression/NullExpression.java |   6 +
 .../common/expression/ValueExpressions.java     | 145 ++++++++++++++++++-
 .../visitors/AbstractExprVisitor.java           |   6 +
 .../expression/visitors/AggregateChecker.java   |  11 ++
 .../visitors/ConditionalExprOptimizer.java      | 125 ++++++++++++++++
 .../expression/visitors/ConstantChecker.java    |  10 ++
 .../common/expression/visitors/ExprVisitor.java |   2 +
 .../visitors/ExpressionValidator.java           |  20 +++
 .../org/apache/drill/common/types/Types.java    |   1 +
 .../main/codegen/templates/CastDateDate.java    |   3 +-
 .../main/codegen/templates/CastDateVarChar.java |   4 +-
 .../codegen/templates/CastIntervalVarChar.java  |   8 +-
 .../main/codegen/templates/CastVarCharDate.java |   4 +-
 .../templates/DateIntervalFunctions.java        |  25 ++--
 .../sig/ConstantExpressionIdentifier.java       |   8 +-
 .../drill/exec/expr/DrillFuncHolderExpr.java    |  23 +++
 .../drill/exec/expr/EvaluationVisitor.java      |  51 +++++--
 .../exec/expr/ExpressionTreeMaterializer.java   |  20 +++
 .../drill/exec/expr/HiveFuncHolderExpr.java     |   6 +
 .../exec/expr/HoldingContainerExpression.java   |  12 ++
 .../exec/expr/ValueVectorReadExpression.java    |  13 +-
 .../exec/expr/ValueVectorWriteExpression.java   |  10 ++
 .../exec/expr/annotations/FunctionTemplate.java |  21 +++
 .../drill/exec/expr/fn/DrillAggFuncHolder.java  |  11 +-
 .../exec/expr/fn/DrillBooleanOPHolder.java      |  36 +++++
 .../drill/exec/expr/fn/DrillFuncHolder.java     |   9 +-
 .../exec/expr/fn/DrillSCBooleanOPHolder.java    |  35 -----
 .../exec/expr/fn/DrillSimpleFuncHolder.java     |  10 +-
 .../drill/exec/expr/fn/FunctionConverter.java   |   6 +-
 .../impl/filter/ReturnValueExpression.java      |   7 +
 .../drill/exec/planner/logical/DrillOptiq.java  |  18 ++-
 .../org/apache/drill/TestExampleQueries.java    |   5 -
 pom.xml                                         |   1 +
 41 files changed, 721 insertions(+), 96 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/antlr3/org/apache/drill/common/expression/parser/ExprParser.g
----------------------------------------------------------------------
diff --git a/common/src/main/antlr3/org/apache/drill/common/expression/parser/ExprParser.g b/common/src/main/antlr3/org/apache/drill/common/expression/parser/ExprParser.g
index ba6d48b..5afaae4 100644
--- a/common/src/main/antlr3/org/apache/drill/common/expression/parser/ExprParser.g
+++ b/common/src/main/antlr3/org/apache/drill/common/expression/parser/ExprParser.g
@@ -204,7 +204,7 @@ orExpr returns [LogicalExpression e]
 	  if(exprs.size() == 1){
 	    $e = exprs.get(0);
 	  }else{
-	    $e = FunctionCallFactory.createExpression("||", p, exprs);
+	    $e = FunctionCallFactory.createBooleanOperator("||", p, exprs);
 	  }
 	}
   :  a1=andExpr { exprs.add($a1.e); p = pos( $a1.start );} (Or a2=andExpr { exprs.add($a2.e); })*
@@ -219,7 +219,7 @@ andExpr returns [LogicalExpression e]
 	  if(exprs.size() == 1){
 	    $e = exprs.get(0);
 	  }else{
-	    $e = FunctionCallFactory.createExpression("&&", p, exprs);
+	    $e = FunctionCallFactory.createBooleanOperator("&&", p, exprs);
 	  }
 	}
   :  e1=equExpr { exprs.add($e1.e); p = pos( $e1.start );  } ( And e2=equExpr { exprs.add($e2.e);  })*

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/BooleanOperator.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/BooleanOperator.java b/common/src/main/java/org/apache/drill/common/expression/BooleanOperator.java
new file mode 100644
index 0000000..e69717b
--- /dev/null
+++ b/common/src/main/java/org/apache/drill/common/expression/BooleanOperator.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.common.expression;
+
+import java.util.List;
+
+import org.apache.drill.common.expression.IfExpression.IfCondition;
+import org.apache.drill.common.expression.visitors.ExprVisitor;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+
+public class BooleanOperator extends FunctionCall{
+
+  public BooleanOperator(String name, List<LogicalExpression> args, ExpressionPosition pos) {
+    super(name, args, pos);
+  }
+  
+  @Override
+  public <T, V, E extends Exception> T accept(ExprVisitor<T, V, E> visitor, V value) throws E{
+    return visitor.visitBooleanOperator(this, value);
+  }
+
+  @Override
+  public MajorType getMajorType() {
+    // If any of argumgnet of a boolean "and"/"or" is nullable, the result is nullable bit. 
+    // Otherwise, it's non-nullable bit. 
+    for (LogicalExpression e : args) {
+      if (e.getMajorType().getMode() == DataMode.OPTIONAL) {
+        return Types.OPTIONAL_BIT;
+      }
+    }
+    return Types.REQUIRED_BIT;
+
+  }
+  
+  @Override
+  public int getSelfCost() { 
+    return 0;  // TODO 
+  }
+  
+  @Override
+  public int getCumulativeCost() {
+    // return the average cost of operands for a boolean "and" | "or"
+    int cost = 0;
+
+    int i = 0;
+    for (LogicalExpression e : this) {
+      cost += e.getCumulativeCost();
+      i++;
+    }
+  
+    return (int) (cost / i) ;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/ExpressionStringBuilder.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/ExpressionStringBuilder.java b/common/src/main/java/org/apache/drill/common/expression/ExpressionStringBuilder.java
index 4e9807f..7d54b6b 100644
--- a/common/src/main/java/org/apache/drill/common/expression/ExpressionStringBuilder.java
+++ b/common/src/main/java/org/apache/drill/common/expression/ExpressionStringBuilder.java
@@ -76,6 +76,11 @@ public class ExpressionStringBuilder extends AbstractExprVisitor<Void, StringBui
     sb.append(") ");
     return null;
   }
+  
+  @Override
+  public Void visitBooleanOperator(BooleanOperator op, StringBuilder sb) throws RuntimeException {
+    return visitFunctionCall(op, sb);
+  }
 
   @Override
   public Void visitFunctionHolderExpression(FunctionHolderExpression holder, StringBuilder sb) throws RuntimeException {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/FunctionCallFactory.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/FunctionCallFactory.java b/common/src/main/java/org/apache/drill/common/expression/FunctionCallFactory.java
index b619fd8..63686b2 100644
--- a/common/src/main/java/org/apache/drill/common/expression/FunctionCallFactory.java
+++ b/common/src/main/java/org/apache/drill/common/expression/FunctionCallFactory.java
@@ -73,6 +73,11 @@ public class FunctionCallFactory {
     return (opToFuncTable.containsKey(op)) ? (opToFuncTable.get(op)) : op;
   }
 
+  public static boolean isBooleanOperator(String funcName) {
+    String opName  = replaceOpWithFuncName(funcName);
+    return opName.equals("booleanAnd") || opName.equals("booleanOr");
+  }
+  
   /*
    * create a cast function.
    * arguments : type -- targetType
@@ -92,11 +97,23 @@ public class FunctionCallFactory {
   }
 
   public static LogicalExpression createExpression(String functionName, ExpressionPosition ep, List<LogicalExpression> args){
-    return new FunctionCall(replaceOpWithFuncName(functionName), args, ep);
+    String name = replaceOpWithFuncName(functionName);
+    if (isBooleanOperator(name))
+      return new BooleanOperator(name, args, ep);
+    else
+      return new FunctionCall(name, args, ep);
+  }
+
+  public static LogicalExpression createExpression(String functionName, ExpressionPosition ep, LogicalExpression... e){
+    return createExpression(functionName, ep, Lists.newArrayList(e));
+  }
+
+  public static LogicalExpression createBooleanOperator(String functionName, List<LogicalExpression> args){
+    return createBooleanOperator(functionName, ExpressionPosition.UNKNOWN, args);
   }
 
-  public static LogicalExpression createExpression(String unaryName, ExpressionPosition ep, LogicalExpression... e){
-    return new FunctionCall(replaceOpWithFuncName(unaryName), Lists.newArrayList(e), ep);
+  public static LogicalExpression createBooleanOperator(String functionName, ExpressionPosition ep, List<LogicalExpression> args){
+    return new BooleanOperator(replaceOpWithFuncName(functionName), args, ep);
   }
 
   public static LogicalExpression createByOp(List<LogicalExpression> args, ExpressionPosition ep, List<String> opTypes) {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/FunctionHolderExpression.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/FunctionHolderExpression.java b/common/src/main/java/org/apache/drill/common/expression/FunctionHolderExpression.java
index b17e00d..eb87522 100644
--- a/common/src/main/java/org/apache/drill/common/expression/FunctionHolderExpression.java
+++ b/common/src/main/java/org/apache/drill/common/expression/FunctionHolderExpression.java
@@ -67,4 +67,10 @@ public abstract class FunctionHolderExpression extends LogicalExpressionBase {
    * is the function output non-deterministic?
    */
   public abstract boolean isRandom();
+  
+  /**
+   * @ return a copy of FunctionHolderExpression, with passed in argument list.
+   */
+  public abstract FunctionHolderExpression copy(List<LogicalExpression> args);
+  
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/IfExpression.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/IfExpression.java b/common/src/main/java/org/apache/drill/common/expression/IfExpression.java
index d1df7f7..8dc220f 100644
--- a/common/src/main/java/org/apache/drill/common/expression/IfExpression.java
+++ b/common/src/main/java/org/apache/drill/common/expression/IfExpression.java
@@ -146,5 +146,23 @@ public class IfExpression extends LogicalExpressionBase{
     return children.iterator();
   }
 
+  @Override
+  public int getSelfCost() { 
+    return 0;  // TODO 
+  }
   
+  @Override
+  public int getCumulativeCost() {
+    // return the average cost of operands for a boolean "and" | "or"
+    int cost = 0;
+
+    int i = 0;
+    for (LogicalExpression e : this) {
+      cost += e.getCumulativeCost();
+      i++;
+    }
+  
+    return (int) (cost / i) ;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/LogicalExpression.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/LogicalExpression.java b/common/src/main/java/org/apache/drill/common/expression/LogicalExpression.java
index cceaef0..64ae07e 100644
--- a/common/src/main/java/org/apache/drill/common/expression/LogicalExpression.java
+++ b/common/src/main/java/org/apache/drill/common/expression/LogicalExpression.java
@@ -52,6 +52,9 @@ public interface LogicalExpression extends Iterable<LogicalExpression>{
 
   public ExpressionPosition getPosition();
 
+  public int getSelfCost();
+  public int getCumulativeCost();
+  
   public static class De extends StdDeserializer<LogicalExpression> {
     DrillConfig config;
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/LogicalExpressionBase.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/LogicalExpressionBase.java b/common/src/main/java/org/apache/drill/common/expression/LogicalExpressionBase.java
index 6f4ab86..056e9dd 100644
--- a/common/src/main/java/org/apache/drill/common/expression/LogicalExpressionBase.java
+++ b/common/src/main/java/org/apache/drill/common/expression/LogicalExpressionBase.java
@@ -19,6 +19,7 @@ package org.apache.drill.common.expression;
 
 import org.apache.drill.common.types.TypeProtos.MajorType;
 
+import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.annotation.JsonPropertyOrder;
 
@@ -54,7 +55,14 @@ public abstract class LogicalExpressionBase implements LogicalExpression{
 		return this.getClass().getSimpleName();
 	}
 	
-
-	
-
+  @JsonIgnore
+  public int getSelfCost() { 
+    throw new UnsupportedOperationException(String.format("The type of %s doesn't currently support LogicalExpression.getSelfCost().", this.getClass().getCanonicalName())); 
+  }
+  
+  @JsonIgnore
+  public int getCumulativeCost() { 
+    throw new UnsupportedOperationException(String.format("The type of %s doesn't currently support LogicalExpression.getCumulativeCost().", this.getClass().getCanonicalName())); 
+  }
+  	
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/NullExpression.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/NullExpression.java b/common/src/main/java/org/apache/drill/common/expression/NullExpression.java
index c39e06a..f515d14 100644
--- a/common/src/main/java/org/apache/drill/common/expression/NullExpression.java
+++ b/common/src/main/java/org/apache/drill/common/expression/NullExpression.java
@@ -26,6 +26,7 @@ import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.common.types.Types;
 
+import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.google.common.collect.Iterators;
 
 public class NullExpression implements LogicalExpression{
@@ -54,4 +55,9 @@ public class NullExpression implements LogicalExpression{
   public Iterator<LogicalExpression> iterator() {
     return Iterators.emptyIterator();
   }
+  
+  public int getSelfCost() { return 0 ; }
+  
+  public int getCumulativeCost() { return 0; }
+  
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/ValueExpressions.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/ValueExpressions.java b/common/src/main/java/org/apache/drill/common/expression/ValueExpressions.java
index 0948c57..db501aa 100644
--- a/common/src/main/java/org/apache/drill/common/expression/ValueExpressions.java
+++ b/common/src/main/java/org/apache/drill/common/expression/ValueExpressions.java
@@ -132,6 +132,15 @@ public class ValueExpressions {
       return Iterators.emptyIterator();
     }
 
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
 
   }
 
@@ -193,6 +202,16 @@ public class ValueExpressions {
       return Iterators.emptyIterator();
     }
 
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
+
   }
 
   public static class IntExpression extends LogicalExpressionBase {
@@ -224,6 +243,17 @@ public class ValueExpressions {
     public Iterator<LogicalExpression> iterator() {
       return Iterators.emptyIterator();
     }
+    
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
+    
 
   }
 
@@ -267,6 +297,17 @@ public class ValueExpressions {
     public Iterator<LogicalExpression> iterator() {
       return Iterators.emptyIterator();
     }
+    
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
+    
 
   }
 
@@ -310,6 +351,16 @@ public class ValueExpressions {
     public Iterator<LogicalExpression> iterator() {
       return Iterators.emptyIterator();
     }
+    
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
 
   }
 
@@ -341,7 +392,16 @@ public class ValueExpressions {
     public Iterator<LogicalExpression> iterator() {
       return Iterators.emptyIterator();
     }
-
+    
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
   }
 
   public static class Decimal38Expression extends LogicalExpressionBase {
@@ -372,6 +432,16 @@ public class ValueExpressions {
       return Iterators.emptyIterator();
     }
 
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
+    
   }
 
 
@@ -403,7 +473,17 @@ public class ValueExpressions {
     public Iterator<LogicalExpression> iterator() {
       return Iterators.emptyIterator();
     }
-
+    
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
+    
   }
 
   public static class LongExpression extends LogicalExpressionBase {
@@ -440,6 +520,17 @@ public class ValueExpressions {
       return Iterators.emptyIterator();
     }
 
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
+    
+
   }
 
 
@@ -476,6 +567,16 @@ public class ValueExpressions {
     public Iterator<LogicalExpression> iterator() {
       return Iterators.emptyIterator();
     }
+    
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
 
   }
 
@@ -514,6 +615,16 @@ public class ValueExpressions {
       return Iterators.emptyIterator();
     }
 
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
+    
   }
 
   public static class TimeStampExpression extends LogicalExpressionBase {
@@ -549,6 +660,16 @@ public class ValueExpressions {
     public Iterator<LogicalExpression> iterator() {
       return Iterators.emptyIterator();
     }
+    
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
 
   }
 
@@ -585,6 +706,16 @@ public class ValueExpressions {
     public Iterator<LogicalExpression> iterator() {
       return Iterators.emptyIterator();
     }
+    
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
 
   }
 
@@ -629,6 +760,16 @@ public class ValueExpressions {
       return Iterators.emptyIterator();
     }
 
+    @Override
+    public int getSelfCost() { 
+      return 0;  // TODO 
+    }
+    
+    @Override
+    public int getCumulativeCost() { 
+      return 0; // TODO
+    }
+    
   }
 
   public static class QuotedString extends ValueExpression<String> {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/visitors/AbstractExprVisitor.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/visitors/AbstractExprVisitor.java b/common/src/main/java/org/apache/drill/common/expression/visitors/AbstractExprVisitor.java
index 0dd6697..af4e0e7 100644
--- a/common/src/main/java/org/apache/drill/common/expression/visitors/AbstractExprVisitor.java
+++ b/common/src/main/java/org/apache/drill/common/expression/visitors/AbstractExprVisitor.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.common.expression.visitors;
 
+import org.apache.drill.common.expression.BooleanOperator;
 import org.apache.drill.common.expression.CastExpression;
 import org.apache.drill.common.expression.ConvertExpression;
 import org.apache.drill.common.expression.FunctionCall;
@@ -61,6 +62,11 @@ public abstract class AbstractExprVisitor<T, VAL, EXCEP extends Exception> imple
   }
 
   @Override
+  public T visitBooleanOperator(BooleanOperator op, VAL value) throws EXCEP {
+    return visitUnknown(op, value);
+  }
+
+  @Override
   public T visitSchemaPath(SchemaPath path, VAL value) throws EXCEP {
     return visitUnknown(path, value);
   }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/visitors/AggregateChecker.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/visitors/AggregateChecker.java b/common/src/main/java/org/apache/drill/common/expression/visitors/AggregateChecker.java
index 48ab3b4..81457b5 100644
--- a/common/src/main/java/org/apache/drill/common/expression/visitors/AggregateChecker.java
+++ b/common/src/main/java/org/apache/drill/common/expression/visitors/AggregateChecker.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.common.expression.visitors;
 
+import org.apache.drill.common.expression.BooleanOperator;
 import org.apache.drill.common.expression.CastExpression;
 import org.apache.drill.common.expression.ConvertExpression;
 import org.apache.drill.common.expression.ErrorCollector;
@@ -79,6 +80,16 @@ public final class AggregateChecker implements ExprVisitor<Boolean, ErrorCollect
   }
 
   @Override
+  public Boolean visitBooleanOperator(BooleanOperator op, ErrorCollector errors){
+    for (LogicalExpression arg : op.args) {
+      if (arg.accept(this, errors))
+        return true;
+    }
+    
+    return false;
+  }
+  
+  @Override
   public Boolean visitIfExpression(IfExpression ifExpr, ErrorCollector errors) {
     for(IfCondition c : ifExpr.conditions){
       if(c.condition.accept(this, errors) || c.expression.accept(this, errors)) return true;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/visitors/ConditionalExprOptimizer.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/visitors/ConditionalExprOptimizer.java b/common/src/main/java/org/apache/drill/common/expression/visitors/ConditionalExprOptimizer.java
new file mode 100644
index 0000000..cdf8729
--- /dev/null
+++ b/common/src/main/java/org/apache/drill/common/expression/visitors/ConditionalExprOptimizer.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.common.expression.visitors;
+
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+import org.apache.drill.common.expression.BooleanOperator;
+import org.apache.drill.common.expression.CastExpression;
+import org.apache.drill.common.expression.ConvertExpression;
+import org.apache.drill.common.expression.FunctionCall;
+import org.apache.drill.common.expression.FunctionHolderExpression;
+import org.apache.drill.common.expression.IfExpression;
+import org.apache.drill.common.expression.LogicalExpression;
+import org.apache.drill.common.expression.NullExpression;
+import org.apache.drill.common.expression.IfExpression.IfCondition;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+
+import com.google.common.base.Function;
+import com.google.common.base.Optional;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+public class ConditionalExprOptimizer extends AbstractExprVisitor<LogicalExpression, Void, RuntimeException> {
+
+  public static ConditionalExprOptimizer INSTANCE = new ConditionalExprOptimizer();
+  
+  @Override
+  public LogicalExpression visitBooleanOperator(BooleanOperator op, Void value) throws RuntimeException {    
+ 
+    List<LogicalExpression> newArgs = Lists.newArrayList();
+    
+    newArgs.addAll(op.args);
+    
+    Collections.sort(newArgs, costComparator);
+    
+    return new BooleanOperator(op.getName(), newArgs, op.getPosition());
+  }
+  
+
+  @Override
+  public LogicalExpression visitFunctionHolderExpression(FunctionHolderExpression holder, Void value) throws RuntimeException {
+    List<LogicalExpression> args = Lists.newArrayList();
+    for (int i = 0; i < holder.args.size(); ++i) {
+      LogicalExpression newExpr = holder.args.get(i).accept(this, value);
+      assert newExpr != null;
+      args.add(newExpr);
+    }
+
+    //replace with a new function call, since its argument could be changed.
+    
+    return holder.copy(args);    
+  }
+
+  @Override
+  public LogicalExpression visitUnknown(LogicalExpression e, Void value) throws RuntimeException {
+    return e;
+  }
+  
+  
+  @Override
+  public LogicalExpression visitIfExpression(IfExpression ifExpr, Void value) throws RuntimeException{
+    List<IfExpression.IfCondition> conditions = Lists.newArrayList(ifExpr.conditions);
+    LogicalExpression newElseExpr = ifExpr.elseExpression.accept(this, value);
+
+    for (int i = 0; i < conditions.size(); ++i) {
+      IfExpression.IfCondition condition = conditions.get(i);
+
+      LogicalExpression newCondition = condition.condition.accept(this, value);
+      LogicalExpression newExpr = condition.expression.accept(this, value);
+      conditions.set(i, new IfExpression.IfCondition(newCondition, newExpr));
+    }
+
+    return IfExpression.newBuilder().setElse(newElseExpr).addConditions(conditions).build();
+  }
+  
+  @Override
+  public LogicalExpression visitFunctionCall(FunctionCall call, Void value) throws RuntimeException {
+    throw new UnsupportedOperationException("FunctionCall is not expected here. "
+        + "It should have been converted to FunctionHolderExpression in materialization");
+  }
+
+  @Override
+  public LogicalExpression visitCastExpression(CastExpression cast, Void value) throws RuntimeException {
+    throw new UnsupportedOperationException("CastExpression is not expected here. "
+        + "It should have been converted to FunctionHolderExpression in materialization");    
+  }
+  
+
+  @Override
+  public LogicalExpression visitConvertExpression(ConvertExpression cast, Void value) throws RuntimeException {
+    throw new UnsupportedOperationException("ConvertExpression is not expected here. "
+        + "It should have been converted to FunctionHolderExpression in materialization");    
+  }
+
+  private static Comparator<LogicalExpression> costComparator = new Comparator<LogicalExpression> () {
+    public int compare(LogicalExpression e1, LogicalExpression e2) {
+      return e1.getCumulativeCost() <= e2.getCumulativeCost() ? -1 : 1;
+    }
+  };
+  
+  
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/visitors/ConstantChecker.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/visitors/ConstantChecker.java b/common/src/main/java/org/apache/drill/common/expression/visitors/ConstantChecker.java
index 2b136a0..c73102a 100644
--- a/common/src/main/java/org/apache/drill/common/expression/visitors/ConstantChecker.java
+++ b/common/src/main/java/org/apache/drill/common/expression/visitors/ConstantChecker.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.common.expression.visitors;
 
+import org.apache.drill.common.expression.BooleanOperator;
 import org.apache.drill.common.expression.CastExpression;
 import org.apache.drill.common.expression.ConvertExpression;
 import org.apache.drill.common.expression.ErrorCollector;
@@ -79,6 +80,15 @@ final class ConstantChecker implements ExprVisitor<Boolean, ErrorCollector, Runt
   }
 
   @Override
+  public Boolean visitBooleanOperator(BooleanOperator op, ErrorCollector errors) {
+    for (LogicalExpression e : op.args) {
+      if (!e.accept(this, errors))
+        return false;
+    }
+    return true;
+  }
+  
+  @Override
   public Boolean visitIfExpression(IfExpression ifExpr, ErrorCollector errors) {
     for (IfCondition c : ifExpr.conditions) {
       if (!c.condition.accept(this, errors) || !c.expression.accept(this, errors))

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/visitors/ExprVisitor.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/visitors/ExprVisitor.java b/common/src/main/java/org/apache/drill/common/expression/visitors/ExprVisitor.java
index d56a16a..cf01930 100644
--- a/common/src/main/java/org/apache/drill/common/expression/visitors/ExprVisitor.java
+++ b/common/src/main/java/org/apache/drill/common/expression/visitors/ExprVisitor.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.common.expression.visitors;
 
+import org.apache.drill.common.expression.BooleanOperator;
 import org.apache.drill.common.expression.CastExpression;
 import org.apache.drill.common.expression.ConvertExpression;
 import org.apache.drill.common.expression.FunctionCall;
@@ -46,6 +47,7 @@ public interface ExprVisitor<T, VAL, EXCEP extends Exception> {
   public T visitFunctionCall(FunctionCall call, VAL value) throws EXCEP;
   public T visitFunctionHolderExpression(FunctionHolderExpression holder, VAL value) throws EXCEP;
   public T visitIfExpression(IfExpression ifExpr, VAL value) throws EXCEP;
+  public T visitBooleanOperator(BooleanOperator call, VAL value) throws EXCEP;
   public T visitSchemaPath(SchemaPath path, VAL value) throws EXCEP;
   public T visitIntConstant(IntExpression intExpr, VAL value) throws EXCEP;
   public T visitFloatConstant(FloatExpression fExpr, VAL value) throws EXCEP;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/expression/visitors/ExpressionValidator.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/expression/visitors/ExpressionValidator.java b/common/src/main/java/org/apache/drill/common/expression/visitors/ExpressionValidator.java
index 1bfb57d..57f93d3 100644
--- a/common/src/main/java/org/apache/drill/common/expression/visitors/ExpressionValidator.java
+++ b/common/src/main/java/org/apache/drill/common/expression/visitors/ExpressionValidator.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.common.expression.visitors;
 
+import org.apache.drill.common.expression.BooleanOperator;
 import org.apache.drill.common.expression.CastExpression;
 import org.apache.drill.common.expression.ConvertExpression;
 import org.apache.drill.common.expression.ErrorCollector;
@@ -73,6 +74,25 @@ public class ExpressionValidator implements ExprVisitor<Void, ErrorCollector, Ru
   }
 
   @Override
+  public Void visitBooleanOperator(BooleanOperator op, ErrorCollector errors) throws RuntimeException {
+    int i = 0;
+    for (LogicalExpression arg : op.args) {
+      if ( arg.getMajorType().getMinorType() != MinorType.BIT) {
+        errors
+            .addGeneralError(
+                arg.getPosition(),
+                String
+                    .format(
+                        "Failure composing boolean operator %s.  All conditions must return a boolean type.  Condition %d was of Type %s.",
+                        op.getName(), i, arg.getMajorType().getMinorType()));
+      }
+      i++;
+    }
+    
+    return null;
+  }
+  
+  @Override
   public Void visitIfExpression(IfExpression ifExpr, ErrorCollector errors) throws RuntimeException {
     // confirm that all conditions are required boolean values.
     int i = 0;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/common/src/main/java/org/apache/drill/common/types/Types.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/types/Types.java b/common/src/main/java/org/apache/drill/common/types/Types.java
index 9280d03..10be21e 100644
--- a/common/src/main/java/org/apache/drill/common/types/Types.java
+++ b/common/src/main/java/org/apache/drill/common/types/Types.java
@@ -31,6 +31,7 @@ public class Types {
   public static final MajorType NULL = required(MinorType.NULL);
   public static final MajorType LATE_BIND_TYPE = optional(MinorType.LATE);
   public static final MajorType REQUIRED_BIT = required(MinorType.BIT);
+  public static final MajorType OPTIONAL_BIT = optional(MinorType.BIT);
 
   public static enum Comparability{
     UNKNOWN, NONE, EQUAL, ORDERED;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/codegen/templates/CastDateDate.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/CastDateDate.java b/exec/java-exec/src/main/codegen/templates/CastDateDate.java
index 821323b..93edd5a 100644
--- a/exec/java-exec/src/main/codegen/templates/CastDateDate.java
+++ b/exec/java-exec/src/main/codegen/templates/CastDateDate.java
@@ -30,6 +30,7 @@ import io.netty.buffer.ByteBuf;
 
 import org.apache.drill.exec.expr.DrillSimpleFunc;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.expr.annotations.Output;
 import org.apache.drill.exec.expr.annotations.Param;
@@ -41,7 +42,7 @@ import org.joda.time.DateMidnight;
 import org.apache.drill.exec.expr.fn.impl.DateUtility;
 
 @SuppressWarnings("unused")
-@FunctionTemplate(name = "cast${type.to?upper_case}", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls=NullHandling.NULL_IF_NULL)
+@FunctionTemplate(name = "cast${type.to?upper_case}", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls=NullHandling.NULL_IF_NULL, costCategory=FunctionCostCategory.COMPLEX)
 public class Cast${type.from}To${type.to} implements DrillSimpleFunc {
 
   @Param ${type.from}Holder in;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/codegen/templates/CastDateVarChar.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/CastDateVarChar.java b/exec/java-exec/src/main/codegen/templates/CastDateVarChar.java
index 5b7ed6d..e2fd9d5 100644
--- a/exec/java-exec/src/main/codegen/templates/CastDateVarChar.java
+++ b/exec/java-exec/src/main/codegen/templates/CastDateVarChar.java
@@ -32,6 +32,7 @@ import io.netty.buffer.ByteBuf;
 
 import org.apache.drill.exec.expr.DrillSimpleFunc;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.expr.annotations.Output;
 import org.apache.drill.exec.expr.annotations.Param;
@@ -44,7 +45,8 @@ import org.joda.time.DateMidnight;
 import org.apache.drill.exec.expr.fn.impl.DateUtility;
 
 @SuppressWarnings("unused")
-@FunctionTemplate(name = "cast${type.to?upper_case}", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls=NullHandling.NULL_IF_NULL)
+@FunctionTemplate(name = "cast${type.to?upper_case}", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls=NullHandling.NULL_IF_NULL, 
+  costCategory = FunctionCostCategory.COMPLEX)
 public class Cast${type.from}To${type.to} implements DrillSimpleFunc {
 
   @Param ${type.from}Holder in;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/codegen/templates/CastIntervalVarChar.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/CastIntervalVarChar.java b/exec/java-exec/src/main/codegen/templates/CastIntervalVarChar.java
index 19adb27..4c88fcc 100644
--- a/exec/java-exec/src/main/codegen/templates/CastIntervalVarChar.java
+++ b/exec/java-exec/src/main/codegen/templates/CastIntervalVarChar.java
@@ -31,6 +31,7 @@ import io.netty.buffer.ByteBuf;
 
 import org.apache.drill.exec.expr.DrillSimpleFunc;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.expr.annotations.Output;
 import org.apache.drill.exec.expr.annotations.Param;
@@ -98,10 +99,12 @@ public class Cast${type.from}To${type.to} implements DrillSimpleFunc {
 
 package org.apache.drill.exec.expr.fn.impl.gcast;
 
+
 import io.netty.buffer.ByteBuf;
 
 import org.apache.drill.exec.expr.DrillSimpleFunc;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.expr.annotations.Output;
 import org.apache.drill.exec.expr.annotations.Param;
@@ -150,10 +153,12 @@ public class Cast${type.from}To${type.to} implements DrillSimpleFunc {
 
 package org.apache.drill.exec.expr.fn.impl.gcast;
 
+
 import io.netty.buffer.ByteBuf;
 
 import org.apache.drill.exec.expr.DrillSimpleFunc;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.expr.annotations.Output;
 import org.apache.drill.exec.expr.annotations.Param;
@@ -166,7 +171,8 @@ import org.joda.time.DateMidnight;
 import org.apache.drill.exec.expr.fn.impl.DateUtility;
 
 @SuppressWarnings("unused")
-@FunctionTemplate(name = "cast${type.to?upper_case}", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls=NullHandling.NULL_IF_NULL)
+@FunctionTemplate(name = "cast${type.to?upper_case}", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls=NullHandling.NULL_IF_NULL, 
+  costCategory = FunctionCostCategory.COMPLEX)
 public class Cast${type.from}To${type.to} implements DrillSimpleFunc {
 
   @Param ${type.from}Holder in;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/codegen/templates/CastVarCharDate.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/CastVarCharDate.java b/exec/java-exec/src/main/codegen/templates/CastVarCharDate.java
index 5a3127a..e2e1143 100644
--- a/exec/java-exec/src/main/codegen/templates/CastVarCharDate.java
+++ b/exec/java-exec/src/main/codegen/templates/CastVarCharDate.java
@@ -30,6 +30,7 @@ import io.netty.buffer.ByteBuf;
 
 import org.apache.drill.exec.expr.DrillSimpleFunc;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.expr.annotations.Output;
 import org.apache.drill.exec.expr.annotations.Param;
@@ -41,7 +42,8 @@ import org.joda.time.DateMidnight;
 import org.apache.drill.exec.expr.fn.impl.DateUtility;
 
 @SuppressWarnings("unused")
-@FunctionTemplate(names = {"cast${type.to?upper_case}", "${type.alias}"}, scope = FunctionTemplate.FunctionScope.SIMPLE, nulls=NullHandling.NULL_IF_NULL)
+@FunctionTemplate(names = {"cast${type.to?upper_case}", "${type.alias}"}, scope = FunctionTemplate.FunctionScope.SIMPLE, nulls=NullHandling.NULL_IF_NULL, 
+  costCategory = FunctionCostCategory.COMPLEX)
 public class Cast${type.from}To${type.to} implements DrillSimpleFunc {
 
   @Param ${type.from}Holder in;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/codegen/templates/DateIntervalFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/codegen/templates/DateIntervalFunctions.java b/exec/java-exec/src/main/codegen/templates/DateIntervalFunctions.java
index a4bea61..b518c44 100644
--- a/exec/java-exec/src/main/codegen/templates/DateIntervalFunctions.java
+++ b/exec/java-exec/src/main/codegen/templates/DateIntervalFunctions.java
@@ -27,10 +27,9 @@
 
 package org.apache.drill.exec.expr.fn.impl;
 
-import javax.xml.ws.Holder;
-
 import org.apache.drill.exec.expr.DrillSimpleFunc;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.expr.annotations.Output;
 import org.apache.drill.exec.expr.annotations.Param;
@@ -191,6 +190,7 @@ package org.apache.drill.exec.expr.fn.impl;
 
 import org.apache.drill.exec.expr.DrillSimpleFunc;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.expr.annotations.Output;
 import org.apache.drill.exec.expr.annotations.Param;
@@ -200,7 +200,8 @@ import org.apache.drill.exec.record.RecordBatch;
 @SuppressWarnings("unused")
 public class GCompare${type.name}Functions {
 
-  @FunctionTemplate(name = "compare_to", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
+  @FunctionTemplate(name = "compare_to", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL, 
+      costCategory = FunctionCostCategory.COMPLEX)
   public static class GCCompare${type.name} implements DrillSimpleFunc {
 
       @Param ${type.name}Holder left;
@@ -220,7 +221,8 @@ public class GCompare${type.name}Functions {
       }
   }
 
-  @FunctionTemplate(name = "less_than", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
+  @FunctionTemplate(name = "less_than", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL, 
+      costCategory = FunctionCostCategory.COMPLEX)
   public static class LessThan${type.name} implements DrillSimpleFunc {
 
       @Param ${type.name}Holder left;
@@ -241,7 +243,8 @@ public class GCompare${type.name}Functions {
       }
   }
 
-  @FunctionTemplate(name = "less_than_or_equal_to", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
+  @FunctionTemplate(name = "less_than_or_equal_to", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL, 
+      costCategory = FunctionCostCategory.COMPLEX)
   public static class LessThanE${type.name} implements DrillSimpleFunc {
 
       @Param ${type.name}Holder left;
@@ -262,7 +265,8 @@ public class GCompare${type.name}Functions {
     }
   }
 
-  @FunctionTemplate(name = "greater_than", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
+  @FunctionTemplate(name = "greater_than", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL,
+      costCategory = FunctionCostCategory.COMPLEX)
   public static class GreaterThan${type.name} implements DrillSimpleFunc {
 
       @Param ${type.name}Holder left;
@@ -283,7 +287,8 @@ public class GCompare${type.name}Functions {
     }
   }
 
-  @FunctionTemplate(name = "greater_than_or_equal_to", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
+  @FunctionTemplate(name = "greater_than_or_equal_to", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL,
+      costCategory = FunctionCostCategory.COMPLEX)
   public static class GreaterThanE${type.name} implements DrillSimpleFunc {
 
       @Param ${type.name}Holder left;
@@ -304,7 +309,8 @@ public class GCompare${type.name}Functions {
       }
   }
 
-  @FunctionTemplate(name = "equal", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
+  @FunctionTemplate(name = "equal", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL,
+      costCategory = FunctionCostCategory.COMPLEX)
   public static class Equals${type.name} implements DrillSimpleFunc {
 
       @Param ${type.name}Holder left;
@@ -324,7 +330,8 @@ public class GCompare${type.name}Functions {
       }
   }
 
-  @FunctionTemplate(name = "not_equal", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
+  @FunctionTemplate(name = "not_equal", scope = FunctionTemplate.FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL,
+      costCategory = FunctionCostCategory.COMPLEX)
   public static class NotEquals${type.name} implements DrillSimpleFunc {
 
       @Param ${type.name}Holder left;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/compile/sig/ConstantExpressionIdentifier.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/sig/ConstantExpressionIdentifier.java b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/sig/ConstantExpressionIdentifier.java
index 489f623..c65951d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/sig/ConstantExpressionIdentifier.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/sig/ConstantExpressionIdentifier.java
@@ -22,6 +22,7 @@ import java.util.IdentityHashMap;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.drill.common.expression.BooleanOperator;
 import org.apache.drill.common.expression.CastExpression;
 import org.apache.drill.common.expression.ConvertExpression;
 import org.apache.drill.common.expression.FunctionCall;
@@ -108,7 +109,12 @@ public class ConstantExpressionIdentifier implements ExprVisitor<Boolean, Identi
   public Boolean visitFunctionHolderExpression(FunctionHolderExpression holder, IdentityHashMap<LogicalExpression, Object> value) throws RuntimeException {
     return checkChildren(holder, value, !holder.isAggregating() && !holder.isRandom());
    }
-  
+
+  @Override
+  public Boolean visitBooleanOperator(BooleanOperator op, IdentityHashMap<LogicalExpression, Object> value) throws RuntimeException {
+    return checkChildren(op, value, true);
+   }
+
   @Override
   public Boolean visitIfExpression(IfExpression ifExpr, IdentityHashMap<LogicalExpression, Object> value){
     return checkChildren(ifExpr, value, true);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/DrillFuncHolderExpr.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/DrillFuncHolderExpr.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/DrillFuncHolderExpr.java
index f164bd8..0341c45 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/DrillFuncHolderExpr.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/DrillFuncHolderExpr.java
@@ -71,4 +71,27 @@ public class DrillFuncHolderExpr extends FunctionHolderExpression implements Ite
   public boolean isComplexWriterFuncHolder() {
     return holder instanceof DrillComplexWriterFuncHolder;
   }
+ 
+  @Override
+  public int getSelfCost() { 
+    return holder.getCostCategory();
+  }
+  
+  @Override
+  public int getCumulativeCost() { 
+    int cost = this.getSelfCost();
+    
+    for (LogicalExpression arg : this.args) {
+      cost += arg.getCumulativeCost();
+    }
+    
+    return cost;
+  }
+
+  @Override
+  public DrillFuncHolderExpr copy(List<LogicalExpression> args) {
+    return new DrillFuncHolderExpr(this.nameUsed, this.holder, args, this.getPosition());
+  }
+  
 }
+

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
index d65e618..f2019b8 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/EvaluationVisitor.java
@@ -21,6 +21,7 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.commons.io.input.NullReader;
+import org.apache.drill.common.expression.BooleanOperator;
 import org.apache.drill.common.expression.CastExpression;
 import org.apache.drill.common.expression.ConvertExpression;
 import org.apache.drill.common.expression.FunctionCall;
@@ -55,7 +56,7 @@ import org.apache.drill.exec.compile.sig.ConstantExpressionIdentifier;
 import org.apache.drill.exec.expr.ClassGenerator.BlockType;
 import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
 import org.apache.drill.exec.expr.fn.DrillFuncHolder;
-import org.apache.drill.exec.expr.fn.DrillSCBooleanOPHolder;
+import org.apache.drill.exec.expr.fn.DrillBooleanOPHolder;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
 import org.apache.drill.exec.expr.fn.HiveFuncHolder;
 import org.apache.drill.exec.physical.impl.filter.ReturnValueExpression;
@@ -97,6 +98,18 @@ public class EvaluationVisitor {
     }
 
     @Override
+    public HoldingContainer visitBooleanOperator(BooleanOperator op,         
+        ClassGenerator<?> generator) throws RuntimeException {
+      if (op.getName().equals("booleanAnd")) {
+        return visitBooleanAnd(op, generator);
+      }else if(op.getName().equals("booleanOr")) {
+        return visitBooleanOr(op, generator);       
+      } else {
+        throw new UnsupportedOperationException("BooleanOperator can only be booleanAnd, booleanOr. You are using " + op.getName());
+      }
+    }
+    
+    @Override
     public HoldingContainer visitFunctionHolderExpression(FunctionHolderExpression holderExpr,
         ClassGenerator<?> generator) throws RuntimeException {
       // TODO: hack: (Drill/Hive)FuncHolderExpr reference classes in exec so
@@ -106,13 +119,6 @@ public class EvaluationVisitor {
       if (holderExpr instanceof DrillFuncHolderExpr) {
         DrillFuncHolder holder = ((DrillFuncHolderExpr) holderExpr).getHolder();
         
-        if (holder instanceof DrillSCBooleanOPHolder && holderExpr.getName().equals("booleanAnd")) {
-          return visitBooleanAnd(holderExpr, generator);
-        }
-
-        if (holder instanceof DrillSCBooleanOPHolder && holderExpr.getName().equals("booleanOr")) {
-          return visitBooleanOr(holderExpr, generator);
-        }
 
         JVar[] workspaceVars = holder.renderStart(generator, null);
 
@@ -603,10 +609,10 @@ public class EvaluationVisitor {
       return fc.accept(this, value);
     }
     
-    private HoldingContainer visitBooleanAnd(FunctionHolderExpression holderExpr,
+    private HoldingContainer visitBooleanAnd(BooleanOperator op,
         ClassGenerator<?> generator) {
       
-      HoldingContainer out = generator.declare(holderExpr.getMajorType());
+      HoldingContainer out = generator.declare(op.getMajorType());
       
       JLabel label = generator.getEvalBlockLabel("AndOP");
       JBlock eval = generator.getEvalBlock().block();  // enter into nested block
@@ -623,8 +629,8 @@ public class EvaluationVisitor {
       //    null    true     null
       //    null    false    false
       //    null    null     null
-      for (int i = 0; i < holderExpr.args.size(); i++) {
-        arg = holderExpr.args.get(i).accept(this, generator);
+      for (int i = 0; i < op.args.size(); i++) {
+        arg = op.args.get(i).accept(this, generator);
         
         JBlock earlyExit = null;
         if (arg.isOptional()) {
@@ -665,10 +671,10 @@ public class EvaluationVisitor {
       return out;
     }
     
-    private HoldingContainer visitBooleanOr(FunctionHolderExpression holderExpr,
+    private HoldingContainer visitBooleanOr(BooleanOperator op,
         ClassGenerator<?> generator) {
       
-      HoldingContainer out = generator.declare(holderExpr.getMajorType());
+      HoldingContainer out = generator.declare(op.getMajorType());
       
       JLabel label = generator.getEvalBlockLabel("OrOP");
       JBlock eval = generator.getEvalBlock().block();
@@ -686,8 +692,8 @@ public class EvaluationVisitor {
       //    null    false    null
       //    null    null     null
       
-      for (int i = 0; i < holderExpr.args.size(); i++) {
-        arg = holderExpr.args.get(i).accept(this, generator);
+      for (int i = 0; i < op.args.size(); i++) {
+        arg = op.args.get(i).accept(this, generator);
         
         JBlock earlyExit = null;
         if (arg.isOptional()) {
@@ -762,6 +768,19 @@ public class EvaluationVisitor {
     }
 
     @Override
+    public HoldingContainer visitBooleanOperator(BooleanOperator e, ClassGenerator<?> generator)
+        throws RuntimeException {
+      if (constantBoundaries.contains(e)) {
+        generator.getMappingSet().enterConstant();
+        HoldingContainer c = super.visitBooleanOperator(e, generator);
+        return renderConstantExpression(generator, c);
+      } else if (generator.getMappingSet().isWithinConstant()) {
+        return super.visitBooleanOperator(e, generator).setConstant(true);
+      } else {
+        return super.visitBooleanOperator(e, generator);
+      }
+    }
+    @Override
     public HoldingContainer visitIfExpression(IfExpression e, ClassGenerator<?> generator) throws RuntimeException {
       if (constantBoundaries.contains(e)) {
         generator.getMappingSet().enterConstant();

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
index 18609f8..4e10d20 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ExpressionTreeMaterializer.java
@@ -26,6 +26,7 @@ import com.google.common.base.Predicate;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Iterables;
 
+import org.apache.drill.common.expression.BooleanOperator;
 import org.apache.drill.common.expression.CastExpression;
 import org.apache.drill.common.expression.ConvertExpression;
 import org.apache.drill.common.expression.ErrorCollector;
@@ -57,6 +58,7 @@ import org.apache.drill.common.expression.ValueExpressions.IntExpression;
 import org.apache.drill.common.expression.ValueExpressions.QuotedString;
 import org.apache.drill.common.expression.fn.CastFunctions;
 import org.apache.drill.common.expression.visitors.AbstractExprVisitor;
+import org.apache.drill.common.expression.visitors.ConditionalExprOptimizer;
 import org.apache.drill.common.expression.visitors.ExpressionValidator;
 import org.apache.drill.common.types.TypeProtos;
 import org.apache.drill.common.types.TypeProtos.DataMode;
@@ -89,6 +91,11 @@ public class ExpressionTreeMaterializer {
   public static LogicalExpression materialize(LogicalExpression expr, VectorAccessible batch, ErrorCollector errorCollector, FunctionImplementationRegistry registry, 
       boolean allowComplexWriterExpr) {
     LogicalExpression out =  expr.accept(new MaterializeVisitor(batch, errorCollector, allowComplexWriterExpr), registry);
+    
+    if (!errorCollector.hasErrors()) {
+      out = out.accept(ConditionalExprOptimizer.INSTANCE, null);
+    }
+    
     if(out instanceof NullExpression){
       return new TypedNullConstant(Types.optional(MinorType.INT));
     }else{
@@ -126,6 +133,19 @@ public class ExpressionTreeMaterializer {
     }
 
     @Override
+    public LogicalExpression visitBooleanOperator(BooleanOperator op, FunctionImplementationRegistry registry) {
+      List<LogicalExpression> args = Lists.newArrayList();
+      for (int i = 0; i < op.args.size(); ++i) {
+        LogicalExpression newExpr = op.args.get(i).accept(this, registry);
+        assert newExpr != null : String.format("Materialization of %s return a null expression.", op.args.get(i));
+        args.add(newExpr);
+      }
+
+      //replace with a new function call, since its argument could be changed.
+      return new BooleanOperator(op.getName(), args, op.getPosition());      
+    }
+    
+    @Override
     public LogicalExpression visitFunctionCall(FunctionCall call, FunctionImplementationRegistry registry) {
       List<LogicalExpression> args = Lists.newArrayList();
       for (int i = 0; i < call.args.size(); ++i) {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
index afcd9cd..c765d39 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
@@ -66,4 +66,10 @@ public class HiveFuncHolderExpr extends FunctionHolderExpression implements Iter
   public boolean isRandom() {
     return holder.isRandom();
   }
+  
+  @Override
+  public HiveFuncHolderExpr copy(List<LogicalExpression> args) {
+    return new HiveFuncHolderExpr(this.nameUsed, this.holder, args, this.getPosition());
+  }
+  
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HoldingContainerExpression.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HoldingContainerExpression.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HoldingContainerExpression.java
index fd26e86..45051b9 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HoldingContainerExpression.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/HoldingContainerExpression.java
@@ -25,6 +25,7 @@ import org.apache.drill.common.expression.visitors.ExprVisitor;
 import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
 
+import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.google.common.collect.Iterators;
 
 public class HoldingContainerExpression implements LogicalExpression{
@@ -60,4 +61,15 @@ public class HoldingContainerExpression implements LogicalExpression{
   public ExpressionPosition getPosition() {
     return ExpressionPosition.UNKNOWN;
   }
+  
+  @Override
+  public int getSelfCost() { 
+    return 0;  // TODO 
+  }
+  
+  @Override
+  public int getCumulativeCost() { 
+    return 0; // TODO
+  }
+    
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ValueVectorReadExpression.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ValueVectorReadExpression.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ValueVectorReadExpression.java
index 6e2809a..f8236d9 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ValueVectorReadExpression.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ValueVectorReadExpression.java
@@ -78,6 +78,15 @@ public class ValueVectorReadExpression implements LogicalExpression{
   public Iterator<LogicalExpression> iterator() {
     return Iterators.emptyIterator();
   }
-
-
+  
+  @Override
+  public int getSelfCost() { 
+    return 0;  // TODO 
+  }
+  
+  @Override
+  public int getCumulativeCost() { 
+    return 0; // TODO
+  }
+  
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ValueVectorWriteExpression.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ValueVectorWriteExpression.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ValueVectorWriteExpression.java
index 02277af..d277057 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ValueVectorWriteExpression.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ValueVectorWriteExpression.java
@@ -78,5 +78,15 @@ public class ValueVectorWriteExpression implements LogicalExpression {
     return Iterators.singletonIterator(child);
   }
 
+  @Override
+  public int getSelfCost() { 
+    return 0;  // TODO 
+  }
+  
+  @Override
+  public int getCumulativeCost() { 
+    return 0; // TODO
+  }
   
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/annotations/FunctionTemplate.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/annotations/FunctionTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/annotations/FunctionTemplate.java
index b364a4a..be43d38 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/annotations/FunctionTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/annotations/FunctionTemplate.java
@@ -46,6 +46,8 @@ public @interface FunctionTemplate {
   boolean isBinaryCommutative() default false;
   boolean isRandom()  default false;
   
+  FunctionCostCategory costCategory() default FunctionCostCategory.SIMPLE;
+  
   public static enum NullHandling {
     INTERNAL, NULL_IF_NULL;
   }
@@ -64,4 +66,23 @@ public @interface FunctionTemplate {
     DECIMAL_ZERO_SCALE,
     SC_BOOLEAN_OPERATOR
   }
+  
+  public static enum FunctionCostCategory {
+    SIMPLE(1), MEDIUM(3), COMPLEX(5);
+    
+    private final int value;
+    
+    private FunctionCostCategory(int value) {
+      this.value = value;
+    }
+    
+    public int getValue() {
+      return this.value;
+    }
+    
+    public static FunctionCostCategory getDefault() {
+      return SIMPLE;
+    }
+    
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillAggFuncHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillAggFuncHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillAggFuncHolder.java
index 48c35f2..efe3ee3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillAggFuncHolder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillAggFuncHolder.java
@@ -25,6 +25,7 @@ import org.apache.drill.common.types.Types;
 import org.apache.drill.exec.expr.ClassGenerator;
 import org.apache.drill.exec.expr.ClassGenerator.BlockType;
 import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionScope;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.record.TypedFieldId;
@@ -48,12 +49,18 @@ class DrillAggFuncHolder extends DrillFuncHolder{
   private final String add;
   private final String output;
   private final String cleanup;
-  
+
   public DrillAggFuncHolder(FunctionScope scope, NullHandling nullHandling, boolean isBinaryCommutative, boolean isRandom,
       String[] registeredNames, ValueReference[] parameters, ValueReference returnValue, WorkspaceReference[] workspaceVars,
       Map<String, String> methods, List<String> imports) {
+    this(scope, nullHandling, isBinaryCommutative, isRandom, registeredNames, parameters, returnValue, workspaceVars, methods, imports, FunctionCostCategory.getDefault());
+  }
+  
+  public DrillAggFuncHolder(FunctionScope scope, NullHandling nullHandling, boolean isBinaryCommutative, boolean isRandom,
+      String[] registeredNames, ValueReference[] parameters, ValueReference returnValue, WorkspaceReference[] workspaceVars,
+      Map<String, String> methods, List<String> imports, FunctionCostCategory costCategory) {
     super(scope, nullHandling, isBinaryCommutative, isRandom,
-      registeredNames, parameters, returnValue, workspaceVars, methods, imports);
+      registeredNames, parameters, returnValue, workspaceVars, methods, imports, costCategory);
     Preconditions.checkArgument(nullHandling == NullHandling.INTERNAL, "An aggregation function is required to do its own null handling.");
     setup = methods.get("setup");
     reset = methods.get("reset");

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillBooleanOPHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillBooleanOPHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillBooleanOPHolder.java
new file mode 100644
index 0000000..9032d37
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillBooleanOPHolder.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.expr.fn;
+
+import java.util.List;
+import java.util.Map;
+
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionScope;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
+
+public class DrillBooleanOPHolder extends DrillSimpleFuncHolder{
+
+  public DrillBooleanOPHolder(FunctionScope scope, NullHandling nullHandling, boolean isBinaryCommutative, boolean isRandom,
+      String[] registeredNames, ValueReference[] parameters, ValueReference returnValue, WorkspaceReference[] workspaceVars,
+      Map<String, String> methods, List<String> imports) {
+    super(scope, nullHandling, isBinaryCommutative, isRandom, registeredNames, parameters, returnValue, workspaceVars, methods, imports, FunctionCostCategory.getDefault());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFuncHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFuncHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFuncHolder.java
index fc8dc00..f3c1e13 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFuncHolder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillFuncHolder.java
@@ -31,6 +31,7 @@ import org.apache.drill.exec.expr.ClassGenerator.BlockType;
 import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
 import org.apache.drill.exec.expr.TypeHelper;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionScope;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 import org.apache.drill.exec.vector.complex.impl.NullableBigIntSingularReaderImpl;
@@ -51,6 +52,7 @@ public abstract class DrillFuncHolder {
 
   protected final FunctionTemplate.FunctionScope scope;
   protected final FunctionTemplate.NullHandling nullHandling;
+  protected final FunctionTemplate.FunctionCostCategory costCategory;
   protected final boolean isBinaryCommutative;
   protected final boolean isRandom;
   protected final String[] registeredNames;
@@ -62,7 +64,7 @@ public abstract class DrillFuncHolder {
 
   public DrillFuncHolder(FunctionScope scope, NullHandling nullHandling, boolean isBinaryCommutative, boolean isRandom,
       String[] registeredNames, ValueReference[] parameters, ValueReference returnValue,
-      WorkspaceReference[] workspaceVars, Map<String, String> methods, List<String> imports) {
+      WorkspaceReference[] workspaceVars, Map<String, String> methods, List<String> imports, FunctionCostCategory costCategory) {
     super();
     this.scope = scope;
     this.nullHandling = nullHandling;
@@ -74,6 +76,7 @@ public abstract class DrillFuncHolder {
     this.parameters = parameters;
     this.returnValue = returnValue;
     this.imports = ImmutableList.copyOf(imports);
+    this.costCategory = costCategory;
   }
 
   public List<String> getImports() {
@@ -223,6 +226,10 @@ public abstract class DrillFuncHolder {
     return registeredNames;
   }
 
+  public int getCostCategory() {
+    return this.costCategory.getValue(); 
+  }
+  
   @Override
   public String toString() {
     final int maxLen = 10;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillSCBooleanOPHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillSCBooleanOPHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillSCBooleanOPHolder.java
deleted file mode 100644
index d6d7037..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillSCBooleanOPHolder.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.drill.exec.expr.fn;
-
-import java.util.List;
-import java.util.Map;
-
-import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionScope;
-import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
-
-public class DrillSCBooleanOPHolder extends DrillSimpleFuncHolder{
-
-  public DrillSCBooleanOPHolder(FunctionScope scope, NullHandling nullHandling, boolean isBinaryCommutative, boolean isRandom,
-      String[] registeredNames, ValueReference[] parameters, ValueReference returnValue, WorkspaceReference[] workspaceVars,
-      Map<String, String> methods, List<String> imports) {
-    super(scope, nullHandling, isBinaryCommutative, isRandom, registeredNames, parameters, returnValue, workspaceVars, methods, imports);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillSimpleFuncHolder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillSimpleFuncHolder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillSimpleFuncHolder.java
index 01fc514..53df02d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillSimpleFuncHolder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/DrillSimpleFuncHolder.java
@@ -27,6 +27,7 @@ import org.apache.drill.common.types.TypeProtos.MajorType;
 import org.apache.drill.exec.expr.ClassGenerator;
 import org.apache.drill.exec.expr.ClassGenerator.BlockType;
 import org.apache.drill.exec.expr.ClassGenerator.HoldingContainer;
+import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionCostCategory;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionScope;
 import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
 
@@ -46,11 +47,16 @@ class DrillSimpleFuncHolder extends DrillFuncHolder{
   private final String resetBody;
   private final String cleanupBody;
 
-
   public DrillSimpleFuncHolder(FunctionScope scope, NullHandling nullHandling, boolean isBinaryCommutative, boolean isRandom,
       String[] registeredNames, ValueReference[] parameters, ValueReference returnValue, WorkspaceReference[] workspaceVars,
       Map<String, String> methods, List<String> imports) {
-    super(scope, nullHandling, isBinaryCommutative, isRandom, registeredNames, parameters, returnValue, workspaceVars, methods, imports);
+    this(scope, nullHandling, isBinaryCommutative, isRandom, registeredNames, parameters, returnValue, workspaceVars, methods, imports, FunctionCostCategory.getDefault());
+  }
+  
+  public DrillSimpleFuncHolder(FunctionScope scope, NullHandling nullHandling, boolean isBinaryCommutative, boolean isRandom,
+      String[] registeredNames, ValueReference[] parameters, ValueReference returnValue, WorkspaceReference[] workspaceVars,
+      Map<String, String> methods, List<String> imports, FunctionCostCategory costCategory) {
+    super(scope, nullHandling, isBinaryCommutative, isRandom, registeredNames, parameters, returnValue, workspaceVars, methods, imports, costCategory);
     setupBody = methods.get("setup");
     evalBody = methods.get("eval");
     resetBody = methods.get("reset");

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionConverter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionConverter.java
index 2107421..8328549 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionConverter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionConverter.java
@@ -220,7 +220,7 @@ public class FunctionConverter {
       switch(template.scope()){
       case POINT_AGGREGATE:
         return new DrillAggFuncHolder(template.scope(), template.nulls(), template.isBinaryCommutative(),
-          template.isRandom(), registeredNames, ps, outputField, works, methods, imports);
+          template.isRandom(), registeredNames, ps, outputField, works, methods, imports, template.costCategory());
       case DECIMAL_AGGREGATE:
         return new DrillDecimalAggFuncHolder(template.scope(), template.nulls(), template.isBinaryCommutative(),
           template.isRandom(), registeredNames, ps, outputField, works, methods, imports);
@@ -234,9 +234,9 @@ public class FunctionConverter {
           return new DrillSimpleFuncHolder(template.scope(), template.nulls(), 
                                            template.isBinaryCommutative(),
                                            template.isRandom(), registeredNames, 
-                                           ps, outputField, works, methods, imports);
+                                           ps, outputField, works, methods, imports, template.costCategory());
       case SC_BOOLEAN_OPERATOR:
-        return new DrillSCBooleanOPHolder(template.scope(), template.nulls(), 
+        return new DrillBooleanOPHolder(template.scope(), template.nulls(), 
             template.isBinaryCommutative(),
             template.isRandom(), registeredNames, 
             ps, outputField, works, methods, imports);

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/filter/ReturnValueExpression.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/filter/ReturnValueExpression.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/filter/ReturnValueExpression.java
index 4ec4b09..1aa3006 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/filter/ReturnValueExpression.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/filter/ReturnValueExpression.java
@@ -70,5 +70,12 @@ public class ReturnValueExpression implements LogicalExpression{
     return returnTrueOnOne;
   }
 
+  public int getSelfCost() { 
+    throw new UnsupportedOperationException(String.format("The type of %s doesn't currently support LogicalExpression.getSelfCost().", this.getClass().getCanonicalName())); 
+  }
+  
+  public int getCumulativeCost() { 
+    throw new UnsupportedOperationException(String.format("The type of %s doesn't currently support LogicalExpression.getCumulativeCost().", this.getClass().getCanonicalName())); 
+  }
   
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillOptiq.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillOptiq.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillOptiq.java
index dd30378..21ff421 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillOptiq.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillOptiq.java
@@ -98,13 +98,19 @@ public class DrillOptiq {
         for(RexNode r : call.getOperands()){
           args.add(r.accept(this));
         }
-        args = Lists.reverse(args);
-        LogicalExpression lastArg = args.get(0);
-        for(int i = 1; i < args.size(); i++){
-          lastArg = FunctionCallFactory.createExpression(funcName, Lists.newArrayList(args.get(i), lastArg));
-        }
 
-        return lastArg;
+        if (FunctionCallFactory.isBooleanOperator(funcName)) {
+          LogicalExpression func = FunctionCallFactory.createBooleanOperator(funcName, args);
+          return func;
+        } else { 
+          args = Lists.reverse(args);
+          LogicalExpression lastArg = args.get(0);
+          for(int i = 1; i < args.size(); i++){
+            lastArg = FunctionCallFactory.createExpression(funcName, Lists.newArrayList(args.get(i), lastArg));
+          }
+  
+          return lastArg;
+        }
       case FUNCTION:
       case FUNCTION_ID:
         logger.debug("Function");

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java b/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
index 308db3d..57cf072 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestExampleQueries.java
@@ -23,11 +23,6 @@ import org.junit.Test;
 
 public class TestExampleQueries extends BaseTestQuery{
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestExampleQueries.class);
-
-  @Test
-  public void testQ() throws Exception {
-    test("select * from cp.`customer.json` where 0 = 1");
-  }
   
   @Test // see DRILL-553
   public void testQueryWithNullValues() throws Exception {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/43bb57e7/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index e1617df..f43962a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -141,6 +141,7 @@
             <exclude>*.patch</exclude>
             <exclude>**/*.pb.cc</exclude>
             <exclude>**/*.pb.h</exclude>
+            <exclude>**/*.linux</exclude>
           </excludes>
         </configuration>
       </plugin>


[07/32] git commit: DRILL-1023: Fix issue where joins are over estimated causing excess parallelization. Use correct interfaces for retrieving row counts.

Posted by ja...@apache.org.
DRILL-1023: Fix issue where joins are over estimated causing excess parallelization.  Use correct interfaces for retrieving row counts.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/a88ebb27
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/a88ebb27
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/a88ebb27

Branch: refs/heads/master
Commit: a88ebb27d405305f41a72efe2fbee7305dc25ba8
Parents: 0dec032
Author: Jacques Nadeau <ja...@apache.org>
Authored: Tue Jun 17 16:02:54 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Wed Jun 18 21:37:46 2014 -0700

----------------------------------------------------------------------
 .../exec/planner/common/DrillJoinRelBase.java    |  9 +++++++--
 .../exec/planner/physical/AggPruleBase.java      |  2 +-
 .../planner/physical/PhysicalPlanCreator.java    |  2 +-
 .../exec/planner/physical/PlannerSettings.java   |  6 ++++++
 .../visitor/ExcessiveExchangeIdentifier.java     |  3 +--
 .../exec/server/options/SystemOptionManager.java |  1 +
 .../exec/server/options/TypeValidators.java      | 19 +++++++++++++++++++
 7 files changed, 36 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a88ebb27/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillJoinRelBase.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillJoinRelBase.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillJoinRelBase.java
index 80f767c..3b3aa1a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillJoinRelBase.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillJoinRelBase.java
@@ -22,6 +22,7 @@ import java.util.HashSet;
 import java.util.List;
 
 import org.apache.drill.exec.planner.cost.DrillCostBase.DrillCostFactory;
+import org.apache.drill.exec.planner.physical.PrelUtil;
 import org.eigenbase.rel.InvalidRelException;
 import org.eigenbase.rel.JoinRelBase;
 import org.eigenbase.rel.JoinRelType;
@@ -41,10 +42,12 @@ import com.google.common.collect.Lists;
 public abstract class DrillJoinRelBase extends JoinRelBase implements DrillRelNode {
   protected List<Integer> leftKeys = Lists.newArrayList();
   protected List<Integer> rightKeys = Lists.newArrayList() ;
+  private final double joinRowFactor;
 
   public DrillJoinRelBase(RelOptCluster cluster, RelTraitSet traits, RelNode left, RelNode right, RexNode condition,
       JoinRelType joinType) throws InvalidRelException {
     super(cluster, traits, left, right, condition, joinType, Collections.<String> emptySet());
+    this.joinRowFactor = PrelUtil.getPlannerSettings(cluster.getPlanner()).getRowCountEstimateFactor();
   }
 
   @Override
@@ -55,8 +58,10 @@ public abstract class DrillJoinRelBase extends JoinRelBase implements DrillRelNo
     return super.computeSelfCost(planner);
   }
 
-
-
+  @Override
+  public double getRows() {
+    return joinRowFactor * Math.max(this.getLeft().getRows(), this.getRight().getRows());
+  }
 
   /**
    * Returns whether there are any elements in common between left and right.

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a88ebb27/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/AggPruleBase.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/AggPruleBase.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/AggPruleBase.java
index 1b1cc94..7b7e3b7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/AggPruleBase.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/AggPruleBase.java
@@ -62,7 +62,7 @@ public abstract class AggPruleBase extends Prule {
   protected boolean create2PhasePlan(RelOptRuleCall call, DrillAggregateRel aggregate) {
     PlannerSettings settings = PrelUtil.getPlannerSettings(call.getPlanner());
     RelNode child = call.rel(0).getInputs().get(0);
-    boolean smallInput = child.computeSelfCost(call.getPlanner()).getRows() < settings.getSliceTarget();
+    boolean smallInput = child.getRows() < settings.getSliceTarget();
     if (! settings.isMultiPhaseAggEnabled() || settings.isSingleMode() || smallInput) {
       return false;
     }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a88ebb27/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PhysicalPlanCreator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PhysicalPlanCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PhysicalPlanCreator.java
index bf1e51a..130ac87 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PhysicalPlanCreator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PhysicalPlanCreator.java
@@ -60,7 +60,7 @@ public class PhysicalPlanCreator {
 
   public PhysicalOperator addMetadata(Prel originalPrel, PhysicalOperator op){
     op.setOperatorId(opIdMap.get(originalPrel).getAsSingleInt());
-    op.setCost(originalPrel.computeSelfCost(originalPrel.getCluster().getPlanner()).getRows());
+    op.setCost(originalPrel.getRows());
     return op;
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a88ebb27/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
index edad125..e10b620 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
@@ -24,6 +24,7 @@ import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.server.options.OptionValidator;
 import org.apache.drill.exec.server.options.TypeValidators.BooleanValidator;
 import org.apache.drill.exec.server.options.TypeValidators.PositiveLongValidator;
+import org.apache.drill.exec.server.options.TypeValidators.RangeDoubleValidator;
 
 public class PlannerSettings implements FrameworkContext{
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(PlannerSettings.class);
@@ -41,6 +42,7 @@ public class PlannerSettings implements FrameworkContext{
   public static final OptionValidator MULTIPHASE = new BooleanValidator("planner.enable_multiphase_agg", true);
   public static final OptionValidator BROADCAST = new BooleanValidator("planner.enable_broadcast_join", true);
   public static final OptionValidator BROADCAST_THRESHOLD = new PositiveLongValidator("planner.broadcast_threshold", MAX_BROADCAST_THRESHOLD, 1000000);
+  public static final OptionValidator JOIN_ROW_COUNT_ESTIMATE_FACTOR = new RangeDoubleValidator("planner.join.row_count_estimate_factor", 0, 100, 1.0d);
 
   public OptionManager options = null;
 
@@ -56,6 +58,10 @@ public class PlannerSettings implements FrameworkContext{
     return numEndPoints;
   }
 
+  public double getRowCountEstimateFactor(){
+    return options.getOption(JOIN_ROW_COUNT_ESTIMATE_FACTOR.getOptionName()).float_val;
+  }
+
   public boolean useDefaultCosting() {
     return useDefaultCosting;
   }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a88ebb27/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/ExcessiveExchangeIdentifier.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/ExcessiveExchangeIdentifier.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/ExcessiveExchangeIdentifier.java
index ae4d661..168fd28 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/ExcessiveExchangeIdentifier.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/visitor/ExcessiveExchangeIdentifier.java
@@ -88,8 +88,7 @@ public class ExcessiveExchangeIdentifier extends BasePrelVisitor<Prel, Excessive
     private int maxWidth = Integer.MAX_VALUE;
 
     public void add(Prel prel){
-      RelOptCost cost = prel.computeSelfCost(prel.getCluster().getPlanner());
-      maxRows = Math.max(cost.getRows(), maxRows);
+      maxRows = Math.max(prel.getRows(), maxRows);
     }
 
     public void setSingular(){

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a88ebb27/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
index c950c5f..8503197 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
@@ -53,6 +53,7 @@ public class SystemOptionManager implements OptionManager{
       PlannerSettings.MULTIPHASE,
       PlannerSettings.BROADCAST,
       PlannerSettings.BROADCAST_THRESHOLD,
+      PlannerSettings.JOIN_ROW_COUNT_ESTIMATE_FACTOR,
       ExecConstants.OUTPUT_FORMAT_VALIDATOR,
       ExecConstants.PARQUET_BLOCK_SIZE_VALIDATOR,
       ExecConstants.SLICE_TARGET_OPTION,

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a88ebb27/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
index bc6a9d3..a90807c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
@@ -45,6 +45,25 @@ public class TypeValidators {
     }
   }
 
+  public static class RangeDoubleValidator extends DoubleValidator {
+    private final double min;
+    private final double max;
+
+    public RangeDoubleValidator(String name, double def, double min, double max) {
+      super(name, def);
+      this.min = min;
+      this.max = max;
+    }
+
+    @Override
+    public void extraValidate(OptionValue v) throws ExpressionParsingException {
+      if (v.float_val > max || v.float_val < min)
+        throw new ExpressionParsingException(String.format("Option %s must be between %d and %d.", getOptionName(), min,
+            max));
+    }
+
+  }
+
   public static class BooleanValidator extends TypeValidator{
     public BooleanValidator(String name, boolean def){
       super(name, Kind.BOOLEAN, OptionValue.createBoolean(OptionType.SYSTEM, name, def));


[18/32] git commit: DRILL-898: C++ Client. Fix decimal data type.

Posted by ja...@apache.org.
DRILL-898: C++ Client. Fix decimal data type.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/a3bf05d3
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/a3bf05d3
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/a3bf05d3

Branch: refs/heads/master
Commit: a3bf05d3e1750187406b19f471570d93f9adde50
Parents: 632f5ca
Author: Xiao Meng <xi...@gmail.com>
Authored: Sat May 31 17:40:10 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Thu Jun 19 20:30:46 2014 -0700

----------------------------------------------------------------------
 .../client/src/clientlib/decimalUtils.cpp       |  2 +-
 .../native/client/src/clientlib/recordBatch.cpp | 12 ++++++++
 .../native/client/src/include/drill/drillc.hpp  |  4 +--
 .../client/src/include/drill/recordBatch.hpp    | 30 +++++++++++++++-----
 4 files changed, 38 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a3bf05d3/contrib/native/client/src/clientlib/decimalUtils.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/decimalUtils.cpp b/contrib/native/client/src/clientlib/decimalUtils.cpp
index 3885faa..779ee72 100644
--- a/contrib/native/client/src/clientlib/decimalUtils.cpp
+++ b/contrib/native/client/src/clientlib/decimalUtils.cpp
@@ -88,7 +88,7 @@ DecimalValue getDecimalValueFromByteBuf(SlicedByteBuf& data, size_t startIndex,
     val.m_unscaledValue = decimalDigits;
 
     // set the sign
-    if (data.getUint32((startIndex) & 0x80000000) != 0)
+    if ((data.getUint32(startIndex) & 0x80000000) != 0)
     {
         val.m_unscaledValue *= -1;
     }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a3bf05d3/contrib/native/client/src/clientlib/recordBatch.cpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/clientlib/recordBatch.cpp b/contrib/native/client/src/clientlib/recordBatch.cpp
index 81b9dbe..17073bd 100644
--- a/contrib/native/client/src/clientlib/recordBatch.cpp
+++ b/contrib/native/client/src/clientlib/recordBatch.cpp
@@ -238,6 +238,18 @@ ValueVectorBase* ValueVectorFactory::allocateValueVector(const Drill::FieldMetad
                     return new NullableValueVectorFixed<float>(b,f.getValueCount());
                 case common::FLOAT8:
                     return new NullableValueVectorFixed<double>(b,f.getValueCount());
+                case common::DECIMAL9:
+                    return new NullableValueVectorDecimal9(b,f.getValueCount(), f.getScale());
+                case common::DECIMAL18:
+                    return new NullableValueVectorDecimal18(b,f.getValueCount(), f.getScale());
+                case common::DECIMAL28DENSE:
+                    return new NullableValueVectorDecimal28Dense(b,f.getValueCount(), f.getScale());
+                case common::DECIMAL38DENSE:
+                    return new NullableValueVectorDecimal38Dense(b,f.getValueCount(), f.getScale());
+                case common::DECIMAL28SPARSE:
+                    return new NullableValueVectorDecimal28Sparse(b,f.getValueCount(), f.getScale());
+                case common::DECIMAL38SPARSE:
+                    return new NullableValueVectorDecimal38Sparse(b,f.getValueCount(), f.getScale());
                 case common::DATE:
                     return new NullableValueVectorTyped<DateHolder,
                            ValueVectorTyped<DateHolder, int64_t> >(b,f.getValueCount());

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a3bf05d3/contrib/native/client/src/include/drill/drillc.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/include/drill/drillc.hpp b/contrib/native/client/src/include/drill/drillc.hpp
index 817b680..93a6b79 100644
--- a/contrib/native/client/src/include/drill/drillc.hpp
+++ b/contrib/native/client/src/include/drill/drillc.hpp
@@ -22,8 +22,8 @@
 #include "drill/common.hpp"
 #include "drill/drillClient.hpp"
 #include "drill/recordBatch.hpp"
-#include "Types.pb.h"
-#include "User.pb.h"
+#include "drill/protobuf/Types.pb.h"
+#include "drill/protobuf/User.pb.h"
 
 #endif
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/a3bf05d3/contrib/native/client/src/include/drill/recordBatch.hpp
----------------------------------------------------------------------
diff --git a/contrib/native/client/src/include/drill/recordBatch.hpp b/contrib/native/client/src/include/drill/recordBatch.hpp
index 984588f..9798997 100644
--- a/contrib/native/client/src/include/drill/recordBatch.hpp
+++ b/contrib/native/client/src/include/drill/recordBatch.hpp
@@ -303,9 +303,13 @@ template <int DECIMAL_DIGITS, int WIDTH_IN_BYTES, bool IS_SPARSE, int MAX_PRECIS
             void getValueAt(size_t index, char* buf, size_t nChars) const {
                 const DecimalValue& val = this->get(index);
                 const std::string& str = boost::lexical_cast<std::string>(val.m_unscaledValue);
-                size_t idxDecimalMark = str.length() - m_scale;
-                const std::string& decStr= str.substr(0, idxDecimalMark) + "." + str.substr(idxDecimalMark, m_scale);
-                strncpy(buf, decStr.c_str(), nChars);
+                if (m_scale == 0) {
+                    strncpy(buf, str.c_str(), nChars);
+                } else {
+                    size_t idxDecimalMark = str.length() - m_scale;
+                    const std::string& decStr= str.substr(0, idxDecimalMark) + "." + str.substr(idxDecimalMark, m_scale);
+                    strncpy(buf, decStr.c_str(), nChars);
+                }
                 return;
             }
 
@@ -336,9 +340,13 @@ template<typename VALUE_TYPE>
             void getValueAt(size_t index, char* buf, size_t nChars) const {
                 VALUE_TYPE value = m_pBuffer->readAt<VALUE_TYPE>(index * sizeof(VALUE_TYPE));
                 const std::string& str = boost::lexical_cast<std::string>(value);
-                size_t idxDecimalMark = str.length() - m_scale;
-                const std::string& decStr= str.substr(0, idxDecimalMark) + "." + str.substr(idxDecimalMark, m_scale);
-                strncpy(buf, decStr.c_str(), nChars);
+                if (m_scale == 0) {
+                    strncpy(buf, str.c_str(), nChars);
+                } else {
+                    size_t idxDecimalMark = str.length() - m_scale;
+                    const std::string& decStr= str.substr(0, idxDecimalMark) + "." + str.substr(idxDecimalMark, m_scale);
+                    strncpy(buf, decStr.c_str(), nChars);
+                }
                 return;
             }
 
@@ -559,6 +567,13 @@ template <class VALUEHOLDER_CLASS_TYPE, class VALUE_VECTOR_TYPE>
                 this->m_pData= new SlicedByteBuf(*b, offsetEnd, b->getLength()-offsetEnd);
                 this->m_pVector= new VALUE_VECTOR_TYPE(m_pData, rowCount);
             }
+            // Specialized for Decimal Types
+            NullableValueVectorTyped(SlicedByteBuf *b, size_t rowCount, int32_t scale):ValueVectorBase(b, rowCount){
+                size_t offsetEnd = (size_t)ceil(rowCount/8.0);
+                this->m_pBitmap= new SlicedByteBuf(*b, 0, offsetEnd);
+                this->m_pData= new SlicedByteBuf(*b, offsetEnd, b->getLength()-offsetEnd);
+                this->m_pVector= new VALUE_VECTOR_TYPE(m_pData, rowCount, scale);
+            }
 
             ~NullableValueVectorTyped(){
                 delete this->m_pBitmap;
@@ -732,11 +747,12 @@ class DECLSPEC_DRILL_CLIENT FieldMetadata{
             m_precision=f.major_type().precision();
             m_bufferLength=f.buffer_length();
         }
-        const std::string& getName(){ return m_name;}
+        const std::string& getName() const{ return m_name;}
         common::MinorType getMinorType() const{ return m_minorType;}
         common::DataMode getDataMode() const{return m_dataMode;}
         uint32_t getValueCount() const{return m_valueCount;}
         uint32_t getScale() const{return m_scale;}
+        uint32_t getPrecision() const{return m_precision;}
         uint32_t getBufferLength() const{return m_bufferLength;}
         void copy(Drill::FieldMetadata& f){
             m_name=f.m_name;


[23/32] git commit: DRILL-1015: Move compound identifier converter step into Sql parser.

Posted by ja...@apache.org.
DRILL-1015: Move compound identifier converter step into Sql parser.

Currently compound identifiers are converted in DrillSqlWorker after
parsing the query, but when views are expanded, we don't apply the
conversion process as the view expander code is in Optiq. Fix is to
move the conversion process to Sql parser it self, so that whenever
a query string is parsed output SqlNode will have compound
identifiers converted.


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/136614fd
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/136614fd
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/136614fd

Branch: refs/heads/master
Commit: 136614fdc9bac95d48590cb39ec531da345b88fb
Parents: 2060506
Author: vkorukanti <ve...@gmail.com>
Authored: Thu Jun 19 18:36:27 2014 -0700
Committer: Jacques Nadeau <ja...@apache.org>
Committed: Fri Jun 20 10:56:16 2014 -0700

----------------------------------------------------------------------
 .../drill/exec/planner/sql/DrillSqlWorker.java  |  8 ++-
 .../DrillParserWithCompoundIdConverter.java     | 53 ++++++++++++++++++++
 exec/jdbc/pom.xml                               |  1 +
 .../org/apache/drill/jdbc/test/TestViews.java   | 16 ++++++
 exec/jdbc/src/test/resources/nation/nation.tbl  | 25 +++++++++
 5 files changed, 98 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/136614fd/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
index c8d2548..cc779ad 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/DrillSqlWorker.java
@@ -38,9 +38,8 @@ import org.apache.drill.exec.planner.sql.handlers.AbstractSqlHandler;
 import org.apache.drill.exec.planner.sql.handlers.DefaultSqlHandler;
 import org.apache.drill.exec.planner.sql.handlers.ExplainHandler;
 import org.apache.drill.exec.planner.sql.handlers.SetOptionHandler;
-import org.apache.drill.exec.planner.sql.parser.CompoundIdentifierConverter;
 import org.apache.drill.exec.planner.sql.parser.DrillSqlCall;
-import org.apache.drill.exec.planner.sql.parser.impl.DrillParserImpl;
+import org.apache.drill.exec.planner.sql.parser.impl.DrillParserWithCompoundIdConverter;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.drill.exec.util.Pointer;
 import org.eigenbase.rel.RelCollationTraitDef;
@@ -72,7 +71,7 @@ public class DrillSqlWorker {
         null : new DrillCostBase.DrillCostFactory() ;
     StdFrameworkConfig config = StdFrameworkConfig.newBuilder() //
         .lex(Lex.MYSQL) //
-        .parserFactory(DrillParserImpl.FACTORY) //
+        .parserFactory(DrillParserWithCompoundIdConverter.FACTORY) //
         .defaultSchema(context.getNewDefaultSchema()) //
         .operatorTable(table) //
         .traitDefs(traitDefs) //
@@ -105,8 +104,7 @@ public class DrillSqlWorker {
   }
 
   public PhysicalPlan getPlan(String sql, Pointer<String> textPlan) throws SqlParseException, ValidationException, RelConversionException, IOException{
-    SqlNode originalNode = planner.parse(sql);
-    SqlNode sqlNode = originalNode.accept(new CompoundIdentifierConverter());
+    SqlNode sqlNode = planner.parse(sql);
 
     AbstractSqlHandler handler;
 

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/136614fd/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/impl/DrillParserWithCompoundIdConverter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/impl/DrillParserWithCompoundIdConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/impl/DrillParserWithCompoundIdConverter.java
new file mode 100644
index 0000000..4886741
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/parser/impl/DrillParserWithCompoundIdConverter.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.planner.sql.parser.impl;
+
+import org.apache.drill.exec.planner.sql.parser.CompoundIdentifierConverter;
+import org.eigenbase.sql.SqlNode;
+import org.eigenbase.sql.parser.SqlAbstractParserImpl;
+import org.eigenbase.sql.parser.SqlParserImplFactory;
+
+import java.io.Reader;
+
+public class DrillParserWithCompoundIdConverter extends DrillParserImpl {
+
+  /**
+   * {@link org.eigenbase.sql.parser.SqlParserImplFactory} implementation for creating parser.
+   */
+  public static final SqlParserImplFactory FACTORY = new SqlParserImplFactory() {
+    public SqlAbstractParserImpl getParser(Reader stream) {
+      return new DrillParserWithCompoundIdConverter(stream);
+    }
+  };
+
+  public DrillParserWithCompoundIdConverter(Reader stream) {
+    super(stream);
+  }
+
+  @Override
+  public SqlNode parseSqlExpressionEof() throws Exception {
+    SqlNode originalSqlNode = super.parseSqlExpressionEof();
+    return originalSqlNode.accept(new CompoundIdentifierConverter());
+  }
+
+  @Override
+  public SqlNode parseSqlStmtEof() throws Exception {
+    SqlNode originalSqlNode = super.parseSqlStmtEof();
+    return originalSqlNode.accept(new CompoundIdentifierConverter());
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/136614fd/exec/jdbc/pom.xml
----------------------------------------------------------------------
diff --git a/exec/jdbc/pom.xml b/exec/jdbc/pom.xml
index f19294f..1cb5844 100644
--- a/exec/jdbc/pom.xml
+++ b/exec/jdbc/pom.xml
@@ -103,6 +103,7 @@
             <exclude>**/*.json</exclude>
             <exclude>**/git.properties</exclude>
             <exclude>**/donuts-output-data.txt</exclude>
+            <exclude>**/*.tbl</exclude>
             <exclude>**/derby.log</exclude>
           </excludes>
         </configuration>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/136614fd/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestViews.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestViews.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestViews.java
index 8e7131c..e3f6a8e 100644
--- a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestViews.java
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestViews.java
@@ -219,6 +219,22 @@ public class TestViews extends JdbcTestQueryBase {
   }
 
   @Test
+  public void testViewWithCompoundIdentifiersInSchema() throws Exception{
+    String query = String.format("CREATE VIEW nationview AS SELECT " +
+        "cast(columns[0] AS int) n_nationkey, " +
+        "cast(columns[1] AS CHAR(25)) n_name, " +
+        "cast(columns[2] AS INT) n_regionkey, " +
+        "cast(columns[3] AS VARCHAR(152)) n_comment " +
+        "FROM dfs.`%s/src/test/resources/nation`", WORKING_PATH);
+
+    testViewHelper(
+        query,
+        "nationview",
+        "SELECT * FROM nationview LIMIT 1",
+        "n_nationkey=0; n_name=ALGERIA; n_regionkey=0; n_comment= haggle. carefully final deposits detect slyly agai");
+  }
+
+  @Test
   public void testDropView() throws Exception{
     JdbcAssert.withNoDefaultSchema().withConnection(new Function<Connection, Void>() {
       public Void apply(Connection connection) {

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/136614fd/exec/jdbc/src/test/resources/nation/nation.tbl
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/resources/nation/nation.tbl b/exec/jdbc/src/test/resources/nation/nation.tbl
new file mode 100644
index 0000000..ed3fd5b
--- /dev/null
+++ b/exec/jdbc/src/test/resources/nation/nation.tbl
@@ -0,0 +1,25 @@
+0|ALGERIA|0| haggle. carefully final deposits detect slyly agai|
+1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon|
+2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special |
+3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold|
+4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d|
+5|ETHIOPIA|0|ven packages wake quickly. regu|
+6|FRANCE|3|refully final requests. regular, ironi|
+7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco|
+8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun|
+9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull|
+10|IRAN|4|efully alongside of the slyly final dependencies. |
+11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula|
+12|JAPAN|2|ously. final, express gifts cajole a|
+13|JORDAN|4|ic deposits are blithely about the carefully regular pa|
+14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t|
+15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets?|
+16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r|
+17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun|
+18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos|
+19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account|
+20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely|
+21|VIETNAM|2|hely enticingly express accounts. even, final |
+22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint|
+23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull|
+24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be|