You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by jn...@apache.org on 2017/06/03 04:45:56 UTC

[01/12] drill git commit: DRILL-5485: Remove WebServer dependency on DrillClient

Repository: drill
Updated Branches:
  refs/heads/master 62326be3c -> 874bf6296


http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestNewMathFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestNewMathFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestNewMathFunctions.java
index b556656..373388a 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestNewMathFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestNewMathFunctions.java
@@ -34,7 +34,7 @@ import org.apache.drill.exec.physical.impl.SimpleRootExec;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.ValueVector;
 import org.apache.drill.exec.vector.VarCharVector;
@@ -71,7 +71,7 @@ public class TestNewMathFunctions extends ExecTest {
   }
 
   public void runTest(@Injectable final DrillbitContext bitContext,
-                      @Injectable UserServer.UserClientConnection connection, Object[] expectedResults, String planPath) throws Throwable {
+                      @Injectable UserClientConnection connection, Object[] expectedResults, String planPath) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final String planString = Resources.toString(Resources.getResource(planPath), Charsets.UTF_8);
@@ -106,14 +106,14 @@ public class TestNewMathFunctions extends ExecTest {
 
   @Test
   public void testTrigoMathFunc(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {Math.sin(45), Math.cos(45), Math.tan(45),Math.asin(45), Math.acos(45), Math.atan(45),Math.sinh(45), Math.cosh(45), Math.tanh(45)};
     runTest(bitContext, connection, expected, "functions/testTrigoMathFunctions.json");
   }
 
   @Test
   public void testExtendedMathFunc(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final BigDecimal d = new BigDecimal("100111111111111111111111111111111111.00000000000000000000000000000000000000000000000000001");
     final Object [] expected = new Object[] {Math.cbrt(1000), Math.log(10), (Math.log(64.0)/Math.log(2.0)), Math.exp(10), Math.toDegrees(0.5), Math.toRadians(45.0), Math.PI, Math.cbrt(d.doubleValue()), Math.log(d.doubleValue()), (Math.log(d.doubleValue())/Math.log(2)), Math.exp(d.doubleValue()), Math.toDegrees(d.doubleValue()), Math.toRadians(d.doubleValue())};
 
@@ -122,14 +122,14 @@ public class TestNewMathFunctions extends ExecTest {
 
   @Test
   public void testTruncDivMod(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable{
+                           @Injectable UserClientConnection connection) throws Throwable{
     final Object [] expected = new Object[] {101.0, 0, 101, 1010.0, 101, 481.0, 0.001099999999931267};
     runTest(bitContext, connection, expected, "functions/testDivModTruncFunctions.json");
   }
 
  @Test
  public void testIsNumeric(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable{
+                           @Injectable UserClientConnection connection) throws Throwable{
    final Object [] expected = new Object[] {1, 1, 1, 0};
    runTest(bitContext, connection, expected, "functions/testIsNumericFunction.json");
  }

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestRepeatedFunction.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestRepeatedFunction.java b/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestRepeatedFunction.java
index 961558a..ca10b63 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestRepeatedFunction.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestRepeatedFunction.java
@@ -34,7 +34,7 @@ import org.apache.drill.exec.physical.impl.SimpleRootExec;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.BitVector;
 import org.apache.drill.exec.vector.IntVector;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestCastFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestCastFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestCastFunctions.java
index 8cab43b..3cd293e 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestCastFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestCastFunctions.java
@@ -45,7 +45,7 @@ import org.apache.drill.exec.record.RecordBatchLoader;
 import org.apache.drill.exec.record.VectorAccessible;
 import org.apache.drill.exec.record.VectorWrapper;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.Drillbit;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.RemoteServiceSet;
@@ -68,7 +68,7 @@ public class TestCastFunctions extends PopUnitTestBase{
   @Test
   // cast to bigint.
   public void testCastBigInt(@Injectable final DrillbitContext bitContext,
-                            @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                            @Injectable UserClientConnection connection) throws Throwable {
 
     mockDrillbitContext(bitContext);
 
@@ -106,7 +106,7 @@ public class TestCastFunctions extends PopUnitTestBase{
   @Test
   //cast to int
   public void testCastInt(@Injectable final DrillbitContext bitContext,
-                            @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                            @Injectable UserClientConnection connection) throws Throwable {
 
     mockDrillbitContext(bitContext);
 
@@ -143,7 +143,7 @@ public class TestCastFunctions extends PopUnitTestBase{
   @Test
   //cast to float4
   public void testCastFloat4(@Injectable final DrillbitContext bitContext,
-                            @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                            @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(CONFIG);
@@ -180,7 +180,7 @@ public class TestCastFunctions extends PopUnitTestBase{
   @Test
   //cast to float8
   public void testCastFloat8(@Injectable final DrillbitContext bitContext,
-                            @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                            @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(CONFIG);
@@ -217,7 +217,7 @@ public class TestCastFunctions extends PopUnitTestBase{
   @Test
   //cast to varchar(length)
   public void testCastVarChar(@Injectable final DrillbitContext bitContext,
-                            @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                            @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(CONFIG);
@@ -253,7 +253,7 @@ public class TestCastFunctions extends PopUnitTestBase{
   @Test
   //cast to varbinary(length)
   public void testCastVarBinary(@Injectable final DrillbitContext bitContext,
-                            @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                            @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(CONFIG);
@@ -289,7 +289,7 @@ public class TestCastFunctions extends PopUnitTestBase{
   @Test
   //nested: cast is nested in another cast, or another function.
   public void testCastNested(@Injectable final DrillbitContext bitContext,
-                            @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                            @Injectable UserClientConnection connection) throws Throwable {
 
     mockDrillbitContext(bitContext);
 
@@ -327,7 +327,7 @@ public class TestCastFunctions extends PopUnitTestBase{
 
   @Test(expected = NumberFormatException.class)
   public void testCastNumException(@Injectable final DrillbitContext bitContext,
-                            @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                            @Injectable UserClientConnection connection) throws Throwable {
 
     mockDrillbitContext(bitContext);
 

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestComparisonFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestComparisonFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestComparisonFunctions.java
index ede26c7..b26e0cf 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestComparisonFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestComparisonFunctions.java
@@ -29,7 +29,7 @@ import org.apache.drill.exec.physical.base.FragmentRoot;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.junit.Test;
 
@@ -47,7 +47,7 @@ public class TestComparisonFunctions extends ExecTest {
   private FunctionImplementationRegistry registry;
 
   public void runTest(@Injectable final DrillbitContext bitContext,
-                      @Injectable UserServer.UserClientConnection connection, String expression, int expectedResults) throws Throwable {
+                      @Injectable UserClientConnection connection, String expression, int expectedResults) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final String planString = Resources.toString(Resources.getResource(COMPARISON_TEST_PHYSICAL_PLAN), Charsets.UTF_8).replaceAll("EXPRESSION", expression);
@@ -82,7 +82,7 @@ public class TestComparisonFunctions extends ExecTest {
 
   @Test
   public void testInt(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     runTest(bitContext, connection, "intColumn == intColumn", 100);
     runTest(bitContext, connection, "intColumn != intColumn", 0);
     runTest(bitContext, connection, "intColumn > intColumn", 0);
@@ -93,7 +93,7 @@ public class TestComparisonFunctions extends ExecTest {
 
   @Test
   public void testBigInt(@Injectable final DrillbitContext bitContext,
-                      @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                      @Injectable UserClientConnection connection) throws Throwable {
     runTest(bitContext, connection, "bigIntColumn == bigIntColumn", 100);
     runTest(bitContext, connection, "bigIntColumn != bigIntColumn", 0);
     runTest(bitContext, connection, "bigIntColumn > bigIntColumn", 0);
@@ -104,7 +104,7 @@ public class TestComparisonFunctions extends ExecTest {
 
   @Test
   public void testFloat4(@Injectable final DrillbitContext bitContext,
-                         @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                         @Injectable UserClientConnection connection) throws Throwable {
     runTest(bitContext, connection, "float4Column == float4Column", 100);
     runTest(bitContext, connection, "float4Column != float4Column", 0);
     runTest(bitContext, connection, "float4Column > float4Column", 0);
@@ -115,7 +115,7 @@ public class TestComparisonFunctions extends ExecTest {
 
   @Test
   public void testFloat8(@Injectable final DrillbitContext bitContext,
-                         @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                         @Injectable UserClientConnection connection) throws Throwable {
     runTest(bitContext, connection, "float8Column == float8Column", 100);
     runTest(bitContext, connection, "float8Column != float8Column", 0);
     runTest(bitContext, connection, "float8Column > float8Column", 0);
@@ -126,7 +126,7 @@ public class TestComparisonFunctions extends ExecTest {
 
   @Test
   public void testIntNullable(@Injectable final DrillbitContext bitContext,
-                      @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                      @Injectable UserClientConnection connection) throws Throwable {
     runTest(bitContext, connection, "intNullableColumn == intNullableColumn", 50);
     runTest(bitContext, connection, "intNullableColumn != intNullableColumn", 0);
     runTest(bitContext, connection, "intNullableColumn > intNullableColumn", 0);
@@ -137,7 +137,7 @@ public class TestComparisonFunctions extends ExecTest {
 
   @Test
   public void testBigIntNullable(@Injectable final DrillbitContext bitContext,
-                         @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                         @Injectable UserClientConnection connection) throws Throwable {
     runTest(bitContext, connection, "bigIntNullableColumn == bigIntNullableColumn", 50);
     runTest(bitContext, connection, "bigIntNullableColumn != bigIntNullableColumn", 0);
     runTest(bitContext, connection, "bigIntNullableColumn > bigIntNullableColumn", 0);

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
index 23912eb..02e047e 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
@@ -38,7 +38,7 @@ import org.apache.drill.exec.proto.UserBitShared.QueryType;
 import org.apache.drill.exec.record.RecordBatchLoader;
 import org.apache.drill.exec.rpc.RpcException;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.OptionValue;
 import org.apache.drill.exec.util.ByteBufUtil.HadoopWritables;
@@ -484,31 +484,31 @@ public class TestConvertFunctions extends BaseTestQuery {
 
   @Test
   public void testFloats5(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     verifyPhysicalPlan("convert_from(convert_to(cast(77 as float8), 'DOUBLE'), 'DOUBLE')", 77.0);
   }
 
   @Test
   public void testFloats5be(@Injectable final DrillbitContext bitContext,
-                          @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                          @Injectable UserClientConnection connection) throws Throwable {
     verifyPhysicalPlan("convert_from(convert_to(cast(77 as float8), 'DOUBLE_BE'), 'DOUBLE_BE')", 77.0);
   }
 
   @Test
   public void testFloats6(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     verifyPhysicalPlan("convert_to(cast(77 as float8), 'DOUBLE')", new byte[] {0, 0, 0, 0, 0, 64, 83, 64});
   }
 
   @Test
   public void testFloats7(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     verifyPhysicalPlan("convert_to(4.9e-324, 'DOUBLE')", new byte[] {1, 0, 0, 0, 0, 0, 0, 0});
   }
 
   @Test
   public void testFloats8(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     verifyPhysicalPlan("convert_to(1.7976931348623157e+308, 'DOUBLE')", new byte[] {-1, -1, -1, -1, -1, -1, -17, 127});
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestImplicitCastFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestImplicitCastFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestImplicitCastFunctions.java
index e8ae370..c0c3aae 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestImplicitCastFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestImplicitCastFunctions.java
@@ -29,7 +29,7 @@ import org.apache.drill.exec.physical.base.FragmentRoot;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.ValueVector;
 import org.junit.Test;
@@ -62,7 +62,7 @@ public class TestImplicitCastFunctions extends ExecTest {
  }
 
   public void runTest(@Injectable final DrillbitContext bitContext,
-                      @Injectable UserServer.UserClientConnection connection, Object[] expectedResults, String planPath) throws Throwable {
+                      @Injectable UserClientConnection connection, Object[] expectedResults, String planPath) throws Throwable {
 
     mockDrillbitContext(bitContext);
 
@@ -99,7 +99,7 @@ public class TestImplicitCastFunctions extends ExecTest {
 
   @Test
   public void testImplicitCastWithConstant(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable{
+                           @Injectable UserClientConnection connection) throws Throwable{
     final Object [] expected = new Object[21];
     expected [0] = new Double (30.1);
     expected [1] = new Double (30.1);
@@ -131,7 +131,7 @@ public class TestImplicitCastFunctions extends ExecTest {
 
   @Test
   public void testImplicitCastWithMockColumn(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable{
+                           @Injectable UserClientConnection connection) throws Throwable{
     final Object [] expected = new Object[5];
     expected [0] = new Integer (0);
     expected [1] = new Integer (0);
@@ -144,7 +144,7 @@ public class TestImplicitCastFunctions extends ExecTest {
 
   @Test
   public void testImplicitCastWithNullExpression(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable{
+                           @Injectable UserClientConnection connection) throws Throwable{
     final Object [] expected = new Object[10];
 
     expected [0] = Boolean.TRUE;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestOptiqPlans.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestOptiqPlans.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestOptiqPlans.java
index 74aff18..e016b04 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestOptiqPlans.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestOptiqPlans.java
@@ -47,7 +47,7 @@ import org.apache.drill.exec.rpc.control.Controller;
 import org.apache.drill.exec.rpc.control.WorkEventBus;
 import org.apache.drill.exec.rpc.data.DataConnectionCreator;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.rpc.user.UserSession;
 import org.apache.drill.exec.server.BootStrapContext;
 import org.apache.drill.exec.server.Drillbit;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestReverseImplicitCast.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestReverseImplicitCast.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestReverseImplicitCast.java
index 76c4718..ae8302d 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestReverseImplicitCast.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestReverseImplicitCast.java
@@ -31,7 +31,7 @@ import org.apache.drill.exec.pop.PopUnitTestBase;
 import org.apache.drill.exec.record.RecordBatchLoader;
 import org.apache.drill.exec.record.VectorWrapper;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.Drillbit;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.RemoteServiceSet;
@@ -45,7 +45,7 @@ public class TestReverseImplicitCast extends PopUnitTestBase {
 
   @Test
   public void twoWayCast(@Injectable final DrillbitContext bitContext,
-                         @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                         @Injectable UserClientConnection connection) throws Throwable {
 
     // Function checks for casting from Float, Double to Decimal data types
     try (RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestSimpleFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestSimpleFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestSimpleFunctions.java
index 6c48651..c21facb 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestSimpleFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestSimpleFunctions.java
@@ -48,7 +48,7 @@ import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
 import org.apache.drill.exec.resolver.FunctionResolver;
 import org.apache.drill.exec.resolver.FunctionResolverFactory;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.NullableVarBinaryVector;
 import org.apache.drill.exec.vector.NullableVarCharVector;
@@ -144,7 +144,7 @@ public class TestSimpleFunctions extends ExecTest {
 
   @Test
   public void testSubstring(@Injectable final DrillbitContext bitContext,
-                            @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                            @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
@@ -177,7 +177,7 @@ public class TestSimpleFunctions extends ExecTest {
 
   @Test
   public void testSubstringNegative(@Injectable final DrillbitContext bitContext,
-                                    @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                                    @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
@@ -211,7 +211,7 @@ public class TestSimpleFunctions extends ExecTest {
 
   @Test
   public void testByteSubstring(@Injectable final DrillbitContext bitContext,
-                                  @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                                  @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestStringFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestStringFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestStringFunctions.java
index e2091c9..b87a085 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestStringFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestStringFunctions.java
@@ -29,7 +29,7 @@ import org.apache.drill.exec.physical.base.FragmentRoot;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.ValueVector;
 import org.apache.drill.exec.vector.VarCharVector;
@@ -67,7 +67,7 @@ public class TestStringFunctions extends ExecTest {
  }
 
   public void runTest(@Injectable final DrillbitContext bitContext,
-                      @Injectable UserServer.UserClientConnection connection, Object[] expectedResults, String planPath) throws Throwable {
+                      @Injectable UserClientConnection connection, Object[] expectedResults, String planPath) throws Throwable {
 
     mockDrillbitContext(bitContext);
 
@@ -102,7 +102,7 @@ public class TestStringFunctions extends ExecTest {
 
   @Test
   public void testCharLength(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     Object [] expected = new Object[] {new Long(8), new Long(0), new Long(5), new Long(5),
                                        new Long(8), new Long(0), new Long(5), new Long(5),
                                        new Long(8), new Long(0), new Long(5), new Long(5),};
@@ -111,63 +111,63 @@ public class TestStringFunctions extends ExecTest {
 
   @Test
   public void testLike(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {Boolean.TRUE, Boolean.TRUE, Boolean.TRUE, Boolean.FALSE};
     runTest(bitContext, connection, expected, "functions/string/testLike.json");
   }
 
   @Test
   public void testSimilar(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {Boolean.TRUE, Boolean.FALSE, Boolean.TRUE, Boolean.FALSE};
     runTest(bitContext, connection, expected, "functions/string/testSimilar.json");
   }
 
   @Test
   public void testLtrim(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"def", "abcdef", "dabc", "", "", ""};
     runTest(bitContext, connection, expected, "functions/string/testLtrim.json");
   }
 
   @Test
   public void testTrim(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"fghI", "", "", "!", " aaa "};
     runTest(bitContext, connection, expected, "functions/string/testTrim.json");
   }
 
   @Test
   public void testReplace(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"aABABcdf", "ABABbABbcdf", "aababcdf", "acdf", "ABCD", "abc"};
     runTest(bitContext, connection, expected, "functions/string/testReplace.json");
   }
 
   @Test
   public void testRtrim(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"abc", "abcdef", "ABd", "", "", ""};
     runTest(bitContext, connection, expected, "functions/string/testRtrim.json");
   }
 
   @Test
   public void testConcat(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"abcABC", "abc", "ABC", ""};
     runTest(bitContext, connection, expected, "functions/string/testConcat.json");
   }
 
   @Test
   public void testLower(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"abcefgh", "abc", ""};
     runTest(bitContext, connection, expected, "functions/string/testLower.json");
   }
 
   @Test
   public void testPosition(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {new Long(2), new Long(0), new Long(0), new Long(0),
                                        new Long(2), new Long(0), new Long(0), new Long(0)};
     runTest(bitContext, connection, expected, "functions/string/testPosition.json");
@@ -175,7 +175,7 @@ public class TestStringFunctions extends ExecTest {
 
   @Test
   public void testRight(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"ef", "abcdef", "abcdef", "cdef", "f", "", ""};
     runTest(bitContext, connection, expected, "functions/string/testRight.json");
   }
@@ -183,48 +183,48 @@ public class TestStringFunctions extends ExecTest {
 
   @Test
   public void testSubstr(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"abc", "bcd", "bcdef", "bcdef", "", "", "", "", "भारत", "वर्ष", "वर्ष", "cdef", "", "", "", "ड्रिल"};
     runTest(bitContext, connection, expected, "functions/string/testSubstr.json");
   }
 
   @Test
   public void testLeft(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"ab", "abcdef", "abcdef", "abcd", "a", "", ""};
     runTest(bitContext, connection, expected, "functions/string/testLeft.json");
   }
 
   @Test
   public void testLpad(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"", "", "abcdef", "ab", "ab", "abcdef", "AAAAabcdef", "ABABabcdef", "ABCAabcdef", "ABCDabcdef"};
     runTest(bitContext, connection, expected, "functions/string/testLpad.json");
   }
 
   @Test
   public void testRegexpReplace(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"ThM", "Th", "Thomas"};
     runTest(bitContext, connection, expected, "functions/string/testRegexpReplace.json");
   }
 
   @Test
   public void testRpad(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"", "", "abcdef", "ab", "ab", "abcdef", "abcdefAAAA", "abcdefABAB", "abcdefABCA", "abcdefABCD"};
     runTest(bitContext, connection, expected, "functions/string/testRpad.json");
   }
 
   @Test
   public void testUpper(@Injectable final DrillbitContext bitContext,
-                           @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                           @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {"ABCEFGH", "ABC", ""};
     runTest(bitContext, connection, expected, "functions/string/testUpper.json");
   }
 
   @Test
-  public void testNewStringFuncs(@Injectable final DrillbitContext bitContext, @Injectable UserServer.UserClientConnection connection) throws Throwable {
+  public void testNewStringFuncs(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
     final Object [] expected = new Object[] {97, 65, -32, "A", "btrim", "Peace Peace Peace ", "हकुना मताता हकुना मताता ", "katcit", "\u00C3\u00A2pple", "नदम"};
     runTest(bitContext, connection, expected, "functions/string/testStringFuncs.json");
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/agg/TestAgg.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/agg/TestAgg.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/agg/TestAgg.java
index efb8f6a..c6e394a 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/agg/TestAgg.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/agg/TestAgg.java
@@ -33,7 +33,7 @@ import org.apache.drill.exec.physical.impl.SimpleRootExec;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.BigIntVector;
 import org.apache.drill.exec.vector.IntVector;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/filter/TestSimpleFilter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/filter/TestSimpleFilter.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/filter/TestSimpleFilter.java
index c84cb20..163667a 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/filter/TestSimpleFilter.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/filter/TestSimpleFilter.java
@@ -32,7 +32,7 @@ import org.apache.drill.exec.physical.impl.SimpleRootExec;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.junit.Ignore;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestHashJoin.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestHashJoin.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestHashJoin.java
index c76b39c..7813675 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestHashJoin.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestHashJoin.java
@@ -40,7 +40,7 @@ import org.apache.drill.exec.proto.BitControl.PlanFragment;
 import org.apache.drill.exec.record.RecordBatchLoader;
 import org.apache.drill.exec.record.VectorWrapper;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.Drillbit;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.RemoteServiceSet;
@@ -62,7 +62,7 @@ public class TestHashJoin extends PopUnitTestBase {
 
   private final DrillConfig c = DrillConfig.create();
 
-  private void testHJMockScanCommon(final DrillbitContext bitContext, UserServer.UserClientConnection connection, String physicalPlan, int expectedRows) throws Throwable {
+  private void testHJMockScanCommon(final DrillbitContext bitContext, UserClientConnection connection, String physicalPlan, int expectedRows) throws Throwable {
 
     mockDrillbitContext(bitContext);
 
@@ -87,21 +87,21 @@ public class TestHashJoin extends PopUnitTestBase {
 
   @Test
   public void multiBatchEqualityJoin(@Injectable final DrillbitContext bitContext,
-                                 @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                                 @Injectable UserClientConnection connection) throws Throwable {
 
     testHJMockScanCommon(bitContext, connection, "/join/hash_join_multi_batch.json", 200000);
   }
 
   @Test
   public void multiBatchRightOuterJoin(@Injectable final DrillbitContext bitContext,
-                                       @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                                       @Injectable UserClientConnection connection) throws Throwable {
 
     testHJMockScanCommon(bitContext, connection, "/join/hj_right_outer_multi_batch.json", 100000);
   }
 
   @Test
   public void multiBatchLeftOuterJoin(@Injectable final DrillbitContext bitContext,
-                                      @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                                      @Injectable UserClientConnection connection) throws Throwable {
 
     testHJMockScanCommon(bitContext, connection, "/join/hj_left_outer_multi_batch.json", 100000);
   }
@@ -149,7 +149,7 @@ public class TestHashJoin extends PopUnitTestBase {
 
   @Test
   public void hjWithExchange(@Injectable final DrillbitContext bitContext,
-                             @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                             @Injectable UserClientConnection connection) throws Throwable {
 
     // Function tests with hash join with exchanges
     try (final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
@@ -177,7 +177,7 @@ public class TestHashJoin extends PopUnitTestBase {
 
   @Test
   public void multipleConditionJoin(@Injectable final DrillbitContext bitContext,
-                                    @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                                    @Injectable UserClientConnection connection) throws Throwable {
 
     // Function tests hash join with multiple join conditions
     try (final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
@@ -223,7 +223,7 @@ public class TestHashJoin extends PopUnitTestBase {
 
   @Test
   public void hjWithExchange1(@Injectable final DrillbitContext bitContext,
-                              @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                              @Injectable UserClientConnection connection) throws Throwable {
 
     // Another test for hash join with exchanges
     try (final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestMergeJoin.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestMergeJoin.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestMergeJoin.java
index bb9c2bd..53c0a67 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestMergeJoin.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestMergeJoin.java
@@ -37,8 +37,7 @@ import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.pop.PopUnitTestBase;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserServer;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.Drillbit;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.RemoteServiceSet;
@@ -61,7 +60,7 @@ public class TestMergeJoin extends PopUnitTestBase {
   @Test
   @Ignore // this doesn't have a sort.  it also causes an infinite loop.  these may or may not be related.
   public void simpleEqualityJoin(@Injectable final DrillbitContext bitContext,
-                                 @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                                 @Injectable UserClientConnection connection) throws Throwable {
 
     mockDrillbitContext(bitContext);
 
@@ -109,7 +108,7 @@ public class TestMergeJoin extends PopUnitTestBase {
   @Test
   @Ignore
   public void orderedEqualityLeftJoin(@Injectable final DrillbitContext bitContext,
-                                      @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                                      @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c,
@@ -160,7 +159,7 @@ public class TestMergeJoin extends PopUnitTestBase {
   @Test
   @Ignore
   public void orderedEqualityInnerJoin(@Injectable final DrillbitContext bitContext,
-                                       @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                                       @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c,
@@ -211,7 +210,7 @@ public class TestMergeJoin extends PopUnitTestBase {
   @Test
   @Ignore
   public void orderedEqualityMultiBatchJoin(@Injectable final DrillbitContext bitContext,
-                                            @Injectable UserServer.UserClientConnection connection) throws Throwable {
+                                            @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c,

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/limit/TestSimpleLimit.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/limit/TestSimpleLimit.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/limit/TestSimpleLimit.java
index f2e1d56..962b9ce 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/limit/TestSimpleLimit.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/limit/TestSimpleLimit.java
@@ -32,7 +32,7 @@ import org.apache.drill.exec.physical.impl.SimpleRootExec;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.BigIntVector;
 import org.junit.Ignore;
@@ -47,14 +47,14 @@ public class TestSimpleLimit extends ExecTest {
   private final DrillConfig c = DrillConfig.create();
 
   @Test
-  public void testLimit(@Injectable final DrillbitContext bitContext, @Injectable UserServer.UserClientConnection connection) throws Throwable {
+  public void testLimit(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
 
     mockDrillbitContext(bitContext);
     verifyLimitCount(bitContext, connection, "test1.json", 5);
   }
 
   @Test
-  public void testLimitNoEnd(@Injectable final DrillbitContext bitContext, @Injectable UserServer.UserClientConnection connection) throws Throwable {
+  public void testLimitNoEnd(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
     verifyLimitCount(bitContext, connection, "test3.json", 95);
   }
@@ -65,7 +65,7 @@ public class TestSimpleLimit extends ExecTest {
   // However, when evaluate the increasingBitInt(0), if the outgoing batch could not hold the new value, doEval() return false, and start the
   // next batch. But the value has already been increased by 1 in the prior failed try. Therefore, the sum of the generated number could be different,
   // depending on the size of each outgoing batch, and when the batch could not hold any more values.
-  public void testLimitAcrossBatches(@Injectable final DrillbitContext bitContext, @Injectable UserServer.UserClientConnection connection) throws Throwable {
+  public void testLimitAcrossBatches(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
 
     mockDrillbitContext(bitContext);
     verifyLimitCount(bitContext, connection, "test2.json", 69999);
@@ -78,7 +78,7 @@ public class TestSimpleLimit extends ExecTest {
 
   }
 
-  private void verifyLimitCount(DrillbitContext bitContext, UserServer.UserClientConnection connection, String testPlan, int expectedCount) throws Throwable {
+  private void verifyLimitCount(DrillbitContext bitContext, UserClientConnection connection, String testPlan, int expectedCount) throws Throwable {
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
     final PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile("/limit/" + testPlan), Charsets.UTF_8));
     final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
@@ -98,7 +98,7 @@ public class TestSimpleLimit extends ExecTest {
     assertTrue(!context.isFailed());
   }
 
-  private void verifySum(DrillbitContext bitContext, UserServer.UserClientConnection connection, String testPlan, int expectedCount, long expectedSum) throws Throwable {
+  private void verifySum(DrillbitContext bitContext, UserClientConnection connection, String testPlan, int expectedCount, long expectedSum) throws Throwable {
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
     final PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile("/limit/" + testPlan), Charsets.UTF_8));
     final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/project/TestSimpleProjection.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/project/TestSimpleProjection.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/project/TestSimpleProjection.java
index 02b798f..f4a718a 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/project/TestSimpleProjection.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/project/TestSimpleProjection.java
@@ -34,7 +34,7 @@ import org.apache.drill.exec.physical.impl.SimpleRootExec;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.util.VectorUtil;
 import org.apache.drill.exec.vector.NullableBigIntVector;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/sort/TestSimpleSort.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/sort/TestSimpleSort.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/sort/TestSimpleSort.java
index 1b10c73..8ba5609 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/sort/TestSimpleSort.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/sort/TestSimpleSort.java
@@ -34,7 +34,7 @@ import org.apache.drill.exec.physical.impl.SimpleRootExec;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.BigIntVector;
 import org.apache.drill.exec.vector.IntVector;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/trace/TestTraceMultiRecordBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/trace/TestTraceMultiRecordBatch.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/trace/TestTraceMultiRecordBatch.java
index 2364798..6351d06 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/trace/TestTraceMultiRecordBatch.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/trace/TestTraceMultiRecordBatch.java
@@ -31,7 +31,7 @@ import org.apache.drill.exec.physical.impl.SimpleRootExec;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.ValueVector;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/trace/TestTraceOutputDump.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/trace/TestTraceOutputDump.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/trace/TestTraceOutputDump.java
index 014ec02..a65393d 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/trace/TestTraceOutputDump.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/trace/TestTraceOutputDump.java
@@ -36,7 +36,7 @@ import org.apache.drill.exec.proto.BitControl.PlanFragment;
 import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
 import org.apache.drill.exec.proto.helper.QueryIdHelper;
 import org.apache.drill.exec.record.VectorAccessible;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/union/TestSimpleUnion.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/union/TestSimpleUnion.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/union/TestSimpleUnion.java
index e6f3a7e..2a392d7 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/union/TestSimpleUnion.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/union/TestSimpleUnion.java
@@ -32,7 +32,7 @@ import org.apache.drill.exec.physical.impl.SimpleRootExec;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.junit.Test;
 
@@ -48,7 +48,7 @@ public class TestSimpleUnion extends ExecTest {
   private final DrillConfig c = DrillConfig.create();
 
   @Test
-  public void testUnion(@Injectable final DrillbitContext bitContext, @Injectable UserServer.UserClientConnection connection) throws Throwable {
+  public void testUnion(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/record/TestRecordIterator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/record/TestRecordIterator.java b/exec/java-exec/src/test/java/org/apache/drill/exec/record/TestRecordIterator.java
index 847caa5..fb71b3d 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/record/TestRecordIterator.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/record/TestRecordIterator.java
@@ -42,7 +42,7 @@ import org.apache.drill.exec.pop.PopUnitTestBase;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.proto.BitControl;
 import org.apache.drill.exec.proto.UserBitShared;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.ValueVector;
 import org.junit.Test;
@@ -58,7 +58,7 @@ public class TestRecordIterator extends PopUnitTestBase {
 
   @Test
   public void testSimpleIterator(@Injectable final DrillbitContext bitContext,
-                                  @Injectable UserServer.UserClientConnection connection) throws Throwable{
+                                  @Injectable UserClientConnection connection) throws Throwable{
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
@@ -115,7 +115,7 @@ public class TestRecordIterator extends PopUnitTestBase {
 
   @Test
   public void testMarkResetIterator(@Injectable final DrillbitContext bitContext,
-                                 @Injectable UserServer.UserClientConnection connection) throws Throwable{
+                                 @Injectable UserClientConnection connection) throws Throwable{
     mockDrillbitContext(bitContext);
 
     final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetRecordReaderTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetRecordReaderTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetRecordReaderTest.java
index 6f3a19a..375ab75 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetRecordReaderTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetRecordReaderTest.java
@@ -50,7 +50,7 @@ import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.record.RecordBatchLoader;
 import org.apache.drill.exec.record.VectorWrapper;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.CachedSingleFileSystem;
 import org.apache.drill.exec.store.TestOutputMutator;
@@ -603,7 +603,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
   @Test
   @Ignore
   public void testPerformance(@Injectable final DrillbitContext bitContext,
-                              @Injectable UserServer.UserClientConnection connection) throws Exception {
+                              @Injectable UserClientConnection connection) throws Exception {
     final DrillConfig c = DrillConfig.create();
     final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
     final FragmentContext context = new FragmentContext(bitContext, BitControl.PlanFragment.getDefaultInstance(), connection, registry);


[11/12] drill git commit: DRILL-5537: Display columns alias for queries with sum() when RDBMS storage plugin is enabled

Posted by jn...@apache.org.
DRILL-5537: Display columns alias for queries with sum() when RDBMS storage plugin is enabled

close #845


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/d38917be
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/d38917be
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/d38917be

Branch: refs/heads/master
Commit: d38917be44c7d98f1b37984e324c781f2a4e25ad
Parents: b14e30b
Author: Arina Ielchiieva <ar...@gmail.com>
Authored: Thu May 25 13:23:43 2017 +0000
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Fri Jun 2 21:43:14 2017 -0700

----------------------------------------------------------------------
 .../logical/DrillReduceAggregatesRule.java      | 63 ++++++--------------
 1 file changed, 17 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/d38917be/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillReduceAggregatesRule.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillReduceAggregatesRule.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillReduceAggregatesRule.java
index 62d679d..9f8d062 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillReduceAggregatesRule.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillReduceAggregatesRule.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -161,11 +161,11 @@ public class DrillReduceAggregatesRule extends RelOptRule {
     List<AggregateCall> oldCalls = oldAggRel.getAggCallList();
     final int nGroups = oldAggRel.getGroupCount();
 
-    List<AggregateCall> newCalls = new ArrayList<AggregateCall>();
+    List<AggregateCall> newCalls = new ArrayList<>();
     Map<AggregateCall, RexNode> aggCallMapping =
-        new HashMap<AggregateCall, RexNode>();
+        new HashMap<>();
 
-    List<RexNode> projList = new ArrayList<RexNode>();
+    List<RexNode> projList = new ArrayList<>();
 
     // pass through group key
     for (int i = 0; i < nGroups; ++i) {
@@ -179,7 +179,7 @@ public class DrillReduceAggregatesRule extends RelOptRule {
     // will add an expression to the end, and we will create an extra
     // project.
     RelNode input = oldAggRel.getInput();
-    List<RexNode> inputExprs = new ArrayList<RexNode>();
+    List<RexNode> inputExprs = new ArrayList<>();
     for (RelDataTypeField field : input.getRowType().getFieldList()) {
       inputExprs.add(
           rexBuilder.makeInputRef(
@@ -315,24 +315,11 @@ public class DrillReduceAggregatesRule extends RelOptRule {
         typeFactory.createTypeWithNullability(
             avgInputType,
             avgInputType.isNullable() || nGroups == 0);
-    // SqlAggFunction sumAgg = new SqlSumAggFunction(sumType);
     SqlAggFunction sumAgg = new SqlSumEmptyIsZeroAggFunction();
-    AggregateCall sumCall =
-        new AggregateCall(
-            sumAgg,
-            oldCall.isDistinct(),
-            oldCall.getArgList(),
-            sumType,
-            null);
+    AggregateCall sumCall = AggregateCall.create(sumAgg, oldCall.isDistinct(), oldCall.getArgList(), -1, sumType, null);
     final SqlCountAggFunction countAgg = (SqlCountAggFunction) SqlStdOperatorTable.COUNT;
     final RelDataType countType = countAgg.getReturnType(typeFactory);
-    AggregateCall countCall =
-        new AggregateCall(
-            countAgg,
-            oldCall.isDistinct(),
-            oldCall.getArgList(),
-            countType,
-            null);
+    AggregateCall countCall = AggregateCall.create(countAgg, oldCall.isDistinct(), oldCall.getArgList(), -1, countType, null);
 
     RexNode tmpsumRef =
         rexBuilder.addAggCall(
@@ -429,23 +416,10 @@ public class DrillReduceAggregatesRule extends RelOptRule {
               argType, argType.isNullable());
       sumZeroAgg = new SqlSumEmptyIsZeroAggFunction();
     }
-    AggregateCall sumZeroCall =
-        new AggregateCall(
-            sumZeroAgg,
-            oldCall.isDistinct(),
-            oldCall.getArgList(),
-            sumType,
-            null);
+    AggregateCall sumZeroCall =AggregateCall.create(sumZeroAgg, oldCall.isDistinct(), oldCall.getArgList(), -1, sumType, null);
     final SqlCountAggFunction countAgg = (SqlCountAggFunction) SqlStdOperatorTable.COUNT;
     final RelDataType countType = countAgg.getReturnType(typeFactory);
-    AggregateCall countCall =
-        new AggregateCall(
-            countAgg,
-            oldCall.isDistinct(),
-            oldCall.getArgList(),
-            countType,
-            null);
-
+    AggregateCall countCall = AggregateCall.create(countAgg, oldCall.isDistinct(), oldCall.getArgList(), -1, countType, null);
     // NOTE:  these references are with respect to the output
     // of newAggRel
     RexNode sumZeroRef =
@@ -524,10 +498,11 @@ public class DrillReduceAggregatesRule extends RelOptRule {
             argType,
             true);
     final AggregateCall sumArgSquaredAggCall =
-        new AggregateCall(
+        AggregateCall.create(
             new SqlSumAggFunction(sumType),
             oldCall.isDistinct(),
             ImmutableIntList.of(argSquaredOrdinal),
+            -1,
             sumType,
             null);
     final RexNode sumArgSquared =
@@ -540,10 +515,11 @@ public class DrillReduceAggregatesRule extends RelOptRule {
             ImmutableList.of(argType));
 
     final AggregateCall sumArgAggCall =
-        new AggregateCall(
+        AggregateCall.create(
             new SqlSumAggFunction(sumType),
             oldCall.isDistinct(),
             ImmutableIntList.of(argOrdinal),
+            -1,
             sumType,
             null);
     final RexNode sumArg =
@@ -561,13 +537,7 @@ public class DrillReduceAggregatesRule extends RelOptRule {
 
     final SqlCountAggFunction countAgg = (SqlCountAggFunction) SqlStdOperatorTable.COUNT;
     final RelDataType countType = countAgg.getReturnType(typeFactory);
-    final AggregateCall countArgAggCall =
-        new AggregateCall(
-            countAgg,
-            oldCall.isDistinct(),
-            oldCall.getArgList(),
-            countType,
-            null);
+    final AggregateCall countArgAggCall = AggregateCall.create(countAgg, oldCall.isDistinct(), oldCall.getArgList(), -1, countType, null);
     final RexNode countArg =
         rexBuilder.addAggCall(
             countArgAggCall,
@@ -719,12 +689,13 @@ public class DrillReduceAggregatesRule extends RelOptRule {
           final SqlAggFunction sumZeroAgg = new DrillCalciteSqlAggFunctionWrapper(
               new SqlSumEmptyIsZeroAggFunction(), sumType);
           AggregateCall sumZeroCall =
-              new AggregateCall(
+              AggregateCall.create(
                   sumZeroAgg,
                   oldAggregateCall.isDistinct(),
                   oldAggregateCall.getArgList(),
+                  -1,
                   sumType,
-                  null);
+                  oldAggregateCall.getName());
           oldAggRel.getCluster().getRexBuilder()
               .addAggCall(sumZeroCall,
                   oldAggRel.getGroupCount(),


[03/12] drill git commit: DRILL-5140: Fix CompileException in run-time generated code when record batch has large number of fields.

Posted by jn...@apache.org.
DRILL-5140: Fix CompileException in run-time generated code when record batch has large number of fields.

- Changed estimation of max index value and added comments.

close #818


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/b14e30b3
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/b14e30b3
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/b14e30b3

Branch: refs/heads/master
Commit: b14e30b3df9803fef418d083837c91d57d7a5fe3
Parents: dd2692e
Author: Volodymyr Vysotskyi <vv...@gmail.com>
Authored: Wed Apr 12 16:07:39 2017 +0000
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Fri Jun 2 21:43:14 2017 -0700

----------------------------------------------------------------------
 .../drill/exec/compile/ClassTransformer.java    |  18 +-
 .../apache/drill/exec/expr/ClassGenerator.java  | 170 ++++++++++++++++++-
 .../exec/compile/TestLargeFileCompilation.java  |   4 +-
 3 files changed, 181 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/b14e30b3/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
index 5bd28b7..3f01a5a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
@@ -22,6 +22,7 @@ import java.util.LinkedList;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.lang3.tuple.Pair;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.util.DrillStringUtils;
 import org.apache.drill.common.util.FileUtils;
@@ -242,14 +243,14 @@ public class ClassTransformer {
       final byte[][] implementationClasses = classLoader.getClassByteCode(set.generated, entireClass);
 
       long totalBytecodeSize = 0;
-      Map<String, ClassNode> classesToMerge = Maps.newHashMap();
+      Map<String, Pair<byte[], ClassNode>> classesToMerge = Maps.newHashMap();
       for (byte[] clazz : implementationClasses) {
         totalBytecodeSize += clazz.length;
         final ClassNode node = AsmUtil.classFromBytes(clazz, ClassReader.EXPAND_FRAMES);
         if (!AsmUtil.isClassOk(logger, "implementationClasses", node)) {
           throw new IllegalStateException("Problem found with implementationClasses");
         }
-        classesToMerge.put(node.name, node);
+        classesToMerge.put(node.name, Pair.of(clazz, node));
       }
 
       final LinkedList<ClassSet> names = Lists.newLinkedList();
@@ -264,7 +265,14 @@ public class ClassTransformer {
         final ClassNames nextPrecompiled = nextSet.precompiled;
         final byte[] precompiledBytes = byteCodeLoader.getClassByteCodeFromPath(nextPrecompiled.clazz);
         final ClassNames nextGenerated = nextSet.generated;
-        final ClassNode generatedNode = classesToMerge.get(nextGenerated.slash);
+        // keeps only classes that have not be merged
+        Pair<byte[], ClassNode> classNodePair = classesToMerge.remove(nextGenerated.slash);
+        final ClassNode generatedNode;
+        if (classNodePair != null) {
+          generatedNode = classNodePair.getValue();
+        } else {
+          generatedNode = null;
+        }
 
         /*
          * TODO
@@ -309,6 +317,10 @@ public class ClassTransformer {
         namesCompleted.add(nextSet);
       }
 
+      // adds byte code of the classes that have not been merged to make them accessible for outer class
+      for (Map.Entry<String, Pair<byte[], ClassNode>> clazz : classesToMerge.entrySet()) {
+        classLoader.injectByteCode(clazz.getKey().replace(FileUtils.separatorChar, '.'), clazz.getValue().getKey());
+      }
       Class<?> c = classLoader.findClass(set.generated.dot);
       if (templateDefinition.getExternalInterface().isAssignableFrom(c)) {
         logger.debug("Compiled and merged {}: bytecode size = {}, time = {} ms.",

http://git-wip-us.apache.org/repos/asf/drill/blob/b14e30b3/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ClassGenerator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ClassGenerator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ClassGenerator.java
index c94bed5..8547ed4 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ClassGenerator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/ClassGenerator.java
@@ -21,10 +21,12 @@ import static org.apache.drill.exec.compile.sig.GeneratorMapping.GM;
 
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Modifier;
+import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.expression.LogicalExpression;
 import org.apache.drill.common.types.TypeProtos;
 import org.apache.drill.common.types.TypeProtos.DataMode;
@@ -59,6 +61,7 @@ import com.sun.codemodel.JTryBlock;
 import com.sun.codemodel.JType;
 import com.sun.codemodel.JVar;
 import org.apache.drill.exec.server.options.OptionSet;
+import org.objectweb.asm.Label;
 
 public class ClassGenerator<T>{
 
@@ -66,7 +69,8 @@ public class ClassGenerator<T>{
   public static final GeneratorMapping DEFAULT_CONSTANT_MAP = GM("doSetup", "doSetup", null, null);
 
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ClassGenerator.class);
-  public static enum BlockType {SETUP, EVAL, RESET, CLEANUP};
+
+  public enum BlockType {SETUP, EVAL, RESET, CLEANUP}
 
   private final SignatureHolder sig;
   private final EvaluationVisitor evaluationVisitor;
@@ -77,10 +81,43 @@ public class ClassGenerator<T>{
   private final CodeGenerator<T> codeGenerator;
 
   public final JDefinedClass clazz;
-  private final LinkedList<SizedJBlock>[] blocks;
+
   private final JCodeModel model;
   private final OptionSet optionManager;
 
+  private ClassGenerator<T> innerClassGenerator;
+  private LinkedList<SizedJBlock>[] blocks;
+  private LinkedList<SizedJBlock>[] oldBlocks;
+
+  /**
+   * Assumed that field has 3 indexes within the constant pull: index of the CONSTANT_Fieldref_info +
+   * CONSTANT_Fieldref_info.name_and_type_index + CONSTANT_NameAndType_info.name_index.
+   * CONSTANT_NameAndType_info.descriptor_index has limited range of values, CONSTANT_Fieldref_info.class_index is
+   * the same for a single class, they will be taken into account later.
+   * <p>
+   * Local variable has 1 index within the constant pool.
+   * {@link org.objectweb.asm.MethodWriter#visitLocalVariable(String, String, String, Label, Label, int)}
+   * <p>
+   * For upper estimation of max index value, suppose that each field and local variable uses different literal
+   * values that have two indexes, then the number of occupied indexes within the constant pull is
+   * fieldCount * 3 + fieldCount * 2 + (index - fieldCount) * 3 => fieldCount * 2 + index * 3
+   * <p>
+   * Assumed that method has 3 indexes within the constant pull: index of the CONSTANT_Methodref_info +
+   * CONSTANT_Methodref_info.name_and_type_index + CONSTANT_NameAndType_info.name_index.
+   * <p>
+   * For the upper estimation of number of split methods suppose that each expression in the method uses single variable.
+   * Suppose that the max number of indexes within the constant pull occupied by fields and local variables is M,
+   * the number of split methods is N, number of abstract methods in the template is A, then splitted methods count is
+   * N = (M - A * N * 3) / 50 => N = M / (50 + A * 3)
+   * <p>
+   * Additionally should be taken into account class references; fields and methods from the template,
+   * so reserves 1000 for them.
+   * <p>
+   * Then the size of the occupied part in the constant pull is
+   * (fieldCount * 2 + index * 3 + 1000) * (1 + 3 / (50 + A * 3))
+   */
+  private long maxIndex;
+
   private int index = 0;
   private int labelIndex = 0;
   private MappingSet mappings;
@@ -123,6 +160,8 @@ public class ClassGenerator<T>{
       JDefinedClass innerClazz = clazz._class(mods, innerClassName);
       innerClasses.put(innerClassName, new ClassGenerator<>(codeGenerator, mappingSet, child, eval, innerClazz, model, optionManager));
     }
+    long maxExprsNumber = optionManager != null ? optionManager.getOption(ExecConstants.CODE_GEN_EXP_IN_METHOD_SIZE_VALIDATOR) : 50;
+    maxIndex = Math.round((0xFFFF / (1 + 3. / (3 * sig.size() + maxExprsNumber)) - 1000) / 3);
   }
 
   public ClassGenerator<T> getInnerGenerator(String name) {
@@ -136,6 +175,9 @@ public class ClassGenerator<T>{
   }
 
   public void setMappingSet(MappingSet mappings) {
+    if (innerClassGenerator != null) {
+      innerClassGenerator.setMappingSet(mappings);
+    }
     this.mappings = mappings;
   }
 
@@ -210,7 +252,19 @@ public class ClassGenerator<T>{
     return declareVectorValueSetupAndMember(DirectExpression.direct(batchName), fieldId);
   }
 
+  /**
+   * Creates class variable for the value vector using metadata from {@code fieldId}
+   * and initializes it using setup blocks.
+   *
+   * @param batchName expression for invoking {@code getValueAccessorById} method
+   * @param fieldId   metadata of the field that should be declared
+   * @return a newly generated class field
+   */
   public JVar declareVectorValueSetupAndMember(DirectExpression batchName, TypedFieldId fieldId) {
+    // declares field in the inner class if innerClassGenerator has been created
+    if (innerClassGenerator != null) {
+      return innerClassGenerator.declareVectorValueSetupAndMember(batchName, fieldId);
+    }
     final ValueVectorSetup setup = new ValueVectorSetup(batchName, fieldId);
 //    JVar var = this.vvDeclaration.get(setup);
 //    if(var != null) return var;
@@ -287,6 +341,65 @@ public class ClassGenerator<T>{
   }
 
   /**
+   * Assigns {@link #blocks} from the last nested {@link #innerClassGenerator} to {@link this#blocks}
+   * recursively if {@link #innerClassGenerator} has been created.
+   */
+  private void setupValidBlocks() {
+    if (createNestedClass()) {
+      // blocks from the last inner class should be used
+      setupInnerClassBlocks();
+    }
+  }
+
+  /**
+   * Creates {@link #innerClassGenerator} with inner class
+   * if {@link #hasMaxIndexValue()} returns {@code true}.
+   *
+   * @return true if splitting happened.
+   */
+  private boolean createNestedClass() {
+    if (hasMaxIndexValue()) {
+      // all new fields will be declared in the class from innerClassGenerator
+      if (innerClassGenerator == null) {
+        try {
+          JDefinedClass innerClazz = clazz._class(JMod.PRIVATE, clazz.name() + "0");
+          innerClassGenerator = new ClassGenerator<>(codeGenerator, mappings, sig, evaluationVisitor, innerClazz, model, optionManager);
+        } catch (JClassAlreadyExistsException e) {
+          throw new DrillRuntimeException(e);
+        }
+        oldBlocks = blocks;
+        innerClassGenerator.index = index;
+        innerClassGenerator.maxIndex += index;
+        // blocks from the inner class should be used
+        setupInnerClassBlocks();
+        return true;
+      }
+      return innerClassGenerator.createNestedClass();
+    }
+    return false;
+  }
+
+  /**
+   * Checks that {@link #index} has reached its max value.
+   *
+   * @return true if {@code index + clazz.fields().size() * 2 / 3} is greater than {@code maxIndex}
+   */
+  private boolean hasMaxIndexValue() {
+    return index + clazz.fields().size() * 2 / 3 > maxIndex;
+  }
+
+  /**
+   * Gets blocks from the last inner {@link ClassGenerator innerClassGenerator}
+   * and assigns it to the {@link this#blocks} recursively.
+   */
+  private void setupInnerClassBlocks() {
+    if (innerClassGenerator != null) {
+      innerClassGenerator.setupInnerClassBlocks();
+      blocks = innerClassGenerator.blocks;
+    }
+  }
+
+  /**
    * Create a new code block, closing the current block.
    *
    * @param mode the {@link BlkCreateMode block create mode}
@@ -306,17 +419,27 @@ public class ClassGenerator<T>{
     }
     if (blockRotated) {
       evaluationVisitor.previousExpressions.clear();
+      setupValidBlocks();
     }
   }
 
+  /**
+   * Creates methods from the signature {@code sig} with body from the appropriate {@code blocks}.
+   */
   void flushCode() {
+    JVar innerClassField = null;
+    if (innerClassGenerator != null) {
+      blocks = oldBlocks;
+      innerClassField = clazz.field(JMod.NONE, model.ref(innerClassGenerator.clazz.name()), "innerClassField");
+      innerClassGenerator.flushCode();
+    }
     int i = 0;
-    for(CodeGeneratorMethod method : sig) {
+    for (CodeGeneratorMethod method : sig) {
       JMethod outer = clazz.method(JMod.PUBLIC, model._ref(method.getReturnType()), method.getMethodName());
-      for(CodeGeneratorArgument arg : method) {
+      for (CodeGeneratorArgument arg : method) {
         outer.param(arg.getType(), arg.getName());
       }
-      for(Class<?> c : method.getThrowsIterable()) {
+      for (Class<?> c : method.getThrowsIterable()) {
         outer._throws(model.ref(c));
       }
       outer._throws(SchemaChangeException.class);
@@ -353,6 +476,38 @@ public class ClassGenerator<T>{
           exprsInMethod += sb.getCount();
         }
       }
+      if (innerClassField != null) {
+        // creates inner class instance and initializes innerClassField
+        if (method.getMethodName().equals("__DRILL_INIT__")) {
+          JInvocation rhs = JExpr._new(innerClassGenerator.clazz);
+          JBlock block = new JBlock().assign(innerClassField, rhs);
+          outer.body().add(block);
+        }
+
+        List<JType> argTypes = new ArrayList<>();
+        for (CodeGeneratorArgument arg : method) {
+          argTypes.add(model._ref(arg.getType()));
+        }
+        JMethod inner = innerClassGenerator.clazz.getMethod(method.getMethodName(), argTypes.toArray(new JType[0]));
+
+        if (inner != null) {
+          // removes empty method from the inner class
+          if (inner.body().isEmpty()) {
+            innerClassGenerator.clazz.methods().remove(inner);
+            continue;
+          }
+
+          JInvocation methodCall = innerClassField.invoke(inner);
+          for (CodeGeneratorArgument arg : method) {
+            methodCall.arg(JExpr.direct(arg.getName()));
+          }
+          if (isVoidMethod) {
+            outer.body().add(methodCall);
+          } else {
+            outer.body()._return(methodCall);
+          }
+        }
+      }
     }
 
     for(ClassGenerator<T> child : innerClasses.values()) {
@@ -373,10 +528,13 @@ public class ClassGenerator<T>{
   }
 
   public JVar declareClassField(String prefix, JType t) {
-    return clazz.field(JMod.NONE, t, prefix + index++);
+    return declareClassField(prefix, t, null);
   }
 
   public JVar declareClassField(String prefix, JType t, JExpression init) {
+    if (innerClassGenerator != null && hasMaxIndexValue()) {
+      return innerClassGenerator.clazz.field(JMod.NONE, t, prefix + index++, init);
+    }
     return clazz.field(JMod.NONE, t, prefix + index++, init);
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/b14e30b3/exec/java-exec/src/test/java/org/apache/drill/exec/compile/TestLargeFileCompilation.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/compile/TestLargeFileCompilation.java b/exec/java-exec/src/test/java/org/apache/drill/exec/compile/TestLargeFileCompilation.java
index 8416d73..1903f35 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/compile/TestLargeFileCompilation.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/compile/TestLargeFileCompilation.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -45,7 +45,7 @@ public class TestLargeFileCompilation extends BaseTestQuery {
 
   private static final int ITERATION_COUNT = Integer.valueOf(System.getProperty("TestLargeFileCompilation.iteration", "1"));
 
-  private static final int NUM_PROJECT_COLUMNS = 2000;
+  private static final int NUM_PROJECT_COLUMNS = 5000;
 
   private static final int NUM_ORDERBY_COLUMNS = 500;
 


[10/12] drill git commit: DRILL-5356: Refactor Parquet Record Reader

Posted by jn...@apache.org.
DRILL-5356: Refactor Parquet Record Reader

The Parquet reader is Drill's premier data source and has worked very well
for many years. As with any piece of code, it has grown in complexity over
that time and has become hard to understand and maintain.

In work in another project, we found that Parquet is accidentally creating
"low density" batches: record batches with little actual data compared to
the amount of memory allocated. We'd like to fix that.

However, the current complexity of the reader code creates a barrier to
making improvements: the code is so complex that it is often better to
leave bugs unfixed, or risk spending large amounts of time struggling to
make small changes.

This commit offers to help revitalize the Parquet reader. Functionality is
identical to the code in master; but code has been pulled apart into
various classes each of which focuses on one part of the task: building
up a schema, keeping track of read state, a strategy for reading various
combinations of records, etc. The idea is that it is easier to understand
several small, focused classes than one huge, complex class. Indeed, the
idea of small, focused classes is common in the industry; it is nothing new.

Unit tests pass with the change. Since no logic has chanaged, we only moved
lines of code, that is a good indication that everything still works.

Also includes fixes based on review comments.

closes #789


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/676ea889
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/676ea889
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/676ea889

Branch: refs/heads/master
Commit: 676ea889bb69e9e0a733cab29665236d066bd1ab
Parents: 9ab91ff
Author: Paul Rogers <pr...@maprtech.com>
Authored: Wed Mar 15 13:49:07 2017 -0700
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Fri Jun 2 21:43:14 2017 -0700

----------------------------------------------------------------------
 .../exec/planner/physical/PlannerSettings.java  |   2 +-
 .../exec/store/parquet/ParquetReaderStats.java  |  70 ++-
 .../store/parquet/ParquetReaderUtility.java     |   2 +-
 .../parquet/columnreaders/BatchReader.java      | 169 +++++++
 .../parquet/columnreaders/ColumnReader.java     |   6 +-
 .../columnreaders/FixedWidthRepeatedReader.java |   2 +-
 .../columnreaders/ParquetColumnMetadata.java    | 154 ++++++
 .../columnreaders/ParquetRecordReader.java      | 494 +++----------------
 .../parquet/columnreaders/ParquetSchema.java    | 265 ++++++++++
 .../store/parquet/columnreaders/ReadState.java  | 192 +++++++
 .../parquet2/DrillParquetGroupConverter.java    |   4 +-
 .../store/parquet/ParquetInternalsTest.java     | 158 ++++++
 .../test/resources/parquet/expected/bogus.csv   |  20 +
 .../resources/parquet/expected/fixedWidth.csv   |  20 +
 .../resources/parquet/expected/mixedWidth.csv   |  20 +
 .../test/resources/parquet/expected/star.csv    |  20 +
 .../parquet/expected/variableWidth.csv          |  20 +
 17 files changed, 1168 insertions(+), 450 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
index 53d67c0..648adb7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
@@ -76,7 +76,7 @@ public class PlannerSettings implements Context{
   public static final OptionValidator HASH_JOIN_SWAP = new BooleanValidator("planner.enable_hashjoin_swap", true);
   public static final OptionValidator HASH_JOIN_SWAP_MARGIN_FACTOR = new RangeDoubleValidator("planner.join.hash_join_swap_margin_factor", 0, 100, 10d);
   public static final String ENABLE_DECIMAL_DATA_TYPE_KEY = "planner.enable_decimal_data_type";
-  public static final OptionValidator ENABLE_DECIMAL_DATA_TYPE = new BooleanValidator(ENABLE_DECIMAL_DATA_TYPE_KEY, false);
+  public static final BooleanValidator ENABLE_DECIMAL_DATA_TYPE = new BooleanValidator(ENABLE_DECIMAL_DATA_TYPE_KEY, false);
   public static final OptionValidator HEP_OPT = new BooleanValidator("planner.enable_hep_opt", true);
   public static final OptionValidator HEP_PARTITION_PRUNING = new BooleanValidator("planner.enable_hep_partition_pruning", true);
   public static final OptionValidator PLANNER_MEMORY_LIMIT = new RangeLongValidator("planner.memory_limit",

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderStats.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderStats.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderStats.java
index b1dc0be..6a7b967 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderStats.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderStats.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -19,6 +19,10 @@ package org.apache.drill.exec.store.parquet;
 
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.drill.exec.ops.OperatorStats;
+import org.apache.drill.exec.store.parquet.columnreaders.ParquetRecordReader.Metric;
+import org.apache.hadoop.fs.Path;
+
 public class ParquetReaderStats {
 
   public AtomicLong numDictPageLoads = new AtomicLong();
@@ -48,6 +52,66 @@ public class ParquetReaderStats {
   public ParquetReaderStats() {
   }
 
-}
-
+  public void logStats(org.slf4j.Logger logger, Path hadoopPath) {
+    logger.trace(
+        "ParquetTrace,Summary,{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}",
+        hadoopPath,
+        numDictPageLoads,
+        numDataPageLoads,
+        numDataPagesDecoded,
+        numDictPagesDecompressed,
+        numDataPagesDecompressed,
+        totalDictPageReadBytes,
+        totalDataPageReadBytes,
+        totalDictDecompressedBytes,
+        totalDataDecompressedBytes,
+        timeDictPageLoads,
+        timeDataPageLoads,
+        timeDataPageDecode,
+        timeDictPageDecode,
+        timeDictPagesDecompressed,
+        timeDataPagesDecompressed,
+        timeDiskScanWait,
+        timeDiskScan,
+        timeFixedColumnRead,
+        timeVarColumnRead
+    );
+  }
 
+  public void update(OperatorStats stats){
+    stats.addLongStat(Metric.NUM_DICT_PAGE_LOADS,
+        numDictPageLoads.longValue());
+    stats.addLongStat(Metric.NUM_DATA_PAGE_lOADS, numDataPageLoads.longValue());
+    stats.addLongStat(Metric.NUM_DATA_PAGES_DECODED, numDataPagesDecoded.longValue());
+    stats.addLongStat(Metric.NUM_DICT_PAGES_DECOMPRESSED,
+        numDictPagesDecompressed.longValue());
+    stats.addLongStat(Metric.NUM_DATA_PAGES_DECOMPRESSED,
+        numDataPagesDecompressed.longValue());
+    stats.addLongStat(Metric.TOTAL_DICT_PAGE_READ_BYTES,
+        totalDictPageReadBytes.longValue());
+    stats.addLongStat(Metric.TOTAL_DATA_PAGE_READ_BYTES,
+        totalDataPageReadBytes.longValue());
+    stats.addLongStat(Metric.TOTAL_DICT_DECOMPRESSED_BYTES,
+        totalDictDecompressedBytes.longValue());
+    stats.addLongStat(Metric.TOTAL_DATA_DECOMPRESSED_BYTES,
+        totalDataDecompressedBytes.longValue());
+    stats.addLongStat(Metric.TIME_DICT_PAGE_LOADS,
+        timeDictPageLoads.longValue());
+    stats.addLongStat(Metric.TIME_DATA_PAGE_LOADS,
+        timeDataPageLoads.longValue());
+    stats.addLongStat(Metric.TIME_DATA_PAGE_DECODE,
+        timeDataPageDecode.longValue());
+    stats.addLongStat(Metric.TIME_DICT_PAGE_DECODE,
+        timeDictPageDecode.longValue());
+    stats.addLongStat(Metric.TIME_DICT_PAGES_DECOMPRESSED,
+        timeDictPagesDecompressed.longValue());
+    stats.addLongStat(Metric.TIME_DATA_PAGES_DECOMPRESSED,
+        timeDataPagesDecompressed.longValue());
+    stats.addLongStat(Metric.TIME_DISK_SCAN_WAIT,
+        timeDiskScanWait.longValue());
+    stats.addLongStat(Metric.TIME_DISK_SCAN, timeDiskScan.longValue());
+    stats.addLongStat(Metric.TIME_FIXEDCOLUMN_READ, timeFixedColumnRead.longValue());
+    stats.addLongStat(Metric.TIME_VARCOLUMN_READ, timeVarColumnRead.longValue());
+    stats.addLongStat(Metric.TIME_PROCESS, timeProcess.longValue());
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderUtility.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderUtility.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderUtility.java
index 4247d41..7d7c13b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderUtility.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetReaderUtility.java
@@ -108,7 +108,7 @@ public class ParquetReaderUtility {
   }
 
   public static void checkDecimalTypeEnabled(OptionManager options) {
-    if (options.getOption(PlannerSettings.ENABLE_DECIMAL_DATA_TYPE_KEY).bool_val == false) {
+    if (! options.getOption(PlannerSettings.ENABLE_DECIMAL_DATA_TYPE)) {
       throw UserException.unsupportedError()
         .message(ExecErrorConstants.DECIMAL_DISABLE_ERR_MSG)
         .build(logger);

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/BatchReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/BatchReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/BatchReader.java
new file mode 100644
index 0000000..651c813
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/BatchReader.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.parquet.columnreaders;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Lists;
+
+/**
+ * Base strategy for reading a batch of Parquet records.
+ */
+public abstract class BatchReader {
+
+  protected final ReadState readState;
+
+  public BatchReader(ReadState readState) {
+    this.readState = readState;
+  }
+
+  public int readBatch() throws Exception {
+    ColumnReader<?> firstColumnStatus = readState.getFirstColumnReader();
+    long recordsToRead = Math.min(getReadCount(firstColumnStatus), readState.getRecordsToRead());
+    int readCount = readRecords(firstColumnStatus, recordsToRead);
+    readState.fillNullVectors(readCount);
+    return readCount;
+  }
+
+  protected abstract long getReadCount(ColumnReader<?> firstColumnStatus);
+
+  protected abstract int readRecords(ColumnReader<?> firstColumnStatus, long recordsToRead) throws Exception;
+
+  protected void readAllFixedFields(long recordsToRead) throws Exception {
+    Stopwatch timer = Stopwatch.createStarted();
+    if(readState.useAsyncColReader()){
+      readAllFixedFieldsParallel(recordsToRead);
+    } else {
+      readAllFixedFieldsSerial(recordsToRead);
+    }
+    readState.parquetReaderStats().timeFixedColumnRead.addAndGet(timer.elapsed(TimeUnit.NANOSECONDS));
+  }
+
+  protected void readAllFixedFieldsSerial(long recordsToRead) throws IOException {
+    for (ColumnReader<?> crs : readState.getColumnReaders()) {
+      crs.processPages(recordsToRead);
+    }
+  }
+
+  protected void readAllFixedFieldsParallel(long recordsToRead) throws Exception {
+    ArrayList<Future<Long>> futures = Lists.newArrayList();
+    for (ColumnReader<?> crs : readState.getColumnReaders()) {
+      Future<Long> f = crs.processPagesAsync(recordsToRead);
+      futures.add(f);
+    }
+    Exception exception = null;
+    for(Future<Long> f: futures){
+      if (exception != null) {
+        f.cancel(true);
+      } else {
+        try {
+          f.get();
+        } catch (Exception e) {
+          f.cancel(true);
+          exception = e;
+        }
+      }
+    }
+    if (exception != null) {
+      throw exception;
+    }
+  }
+
+  /**
+   * Strategy for reading mock records. Mock records appear to occur in the case
+   * in which the query has SELECT a, b, but the Parquet file has only c, d.
+   * A mock scan reads dummy columns for all records to ensure that the batch
+   * contains a record for each Parquet record, but with no data per record.
+   * (This explanation is reverse-engineered from the code and may be wrong.
+   * Caveat emptor!)
+   */
+
+  public static class MockBatchReader extends BatchReader {
+
+    public MockBatchReader(ReadState readState) {
+      super(readState);
+    }
+
+    @Override
+    protected long getReadCount(ColumnReader<?> firstColumnStatus) {
+      if (readState.recordsRead() == readState.schema().getGroupRecordCount()) {
+        return 0;
+      }
+      return Math.min(ParquetRecordReader.DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH,
+                      readState.schema().getGroupRecordCount() - readState.recordsRead());
+    }
+
+    @Override
+    protected int readRecords(ColumnReader<?> firstColumnStatus, long recordsToRead) {
+      readState.updateCounts((int) recordsToRead);
+      return (int) recordsToRead;
+    }
+  }
+
+  /**
+   * Strategy for reading a record batch when all columns are
+   * fixed-width.
+   */
+
+  public static class FixedWidthReader extends BatchReader {
+
+    public FixedWidthReader(ReadState readState) {
+      super(readState);
+    }
+
+    @Override
+    protected long getReadCount(ColumnReader<?> firstColumnStatus) {
+      return Math.min(readState.schema().getRecordsPerBatch(),
+                      firstColumnStatus.columnChunkMetaData.getValueCount() - firstColumnStatus.totalValuesRead);
+    }
+
+    @Override
+    protected int readRecords(ColumnReader<?> firstColumnStatus, long recordsToRead) throws Exception {
+      readAllFixedFields(recordsToRead);
+      return firstColumnStatus.getRecordsReadInCurrentPass();
+    }
+  }
+
+  /**
+   * Strategy for reading a record batch when at last one column is
+   * variable width.
+   */
+
+  public static class VariableWidthReader extends BatchReader {
+
+    public VariableWidthReader(ReadState readState) {
+      super(readState);
+    }
+
+    @Override
+    protected long getReadCount(ColumnReader<?> firstColumnStatus) {
+      return ParquetRecordReader.DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH;
+    }
+
+    @Override
+    protected int readRecords(ColumnReader<?> firstColumnStatus, long recordsToRead) throws Exception {
+      long fixedRecordsToRead = readState.varLengthReader().readFields(recordsToRead);
+      readAllFixedFields(fixedRecordsToRead);
+      return firstColumnStatus.getRecordsReadInCurrentPass();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
index 5eaf286..98e1d78 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ColumnReader.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -101,11 +101,9 @@ public abstract class ColumnReader<V extends ValueVector> {
     }
     if (columnDescriptor.getType() != PrimitiveType.PrimitiveTypeName.BINARY) {
       if (columnDescriptor.getType() == PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY) {
-        // Here "bits" means "bytes"
         dataTypeLengthInBits = columnDescriptor.getTypeLength() * 8;
       } else {
-        // While here, "bits" means "bits"
-        dataTypeLengthInBits = ParquetRecordReader.getTypeLengthInBits(columnDescriptor.getType());
+        dataTypeLengthInBits = ParquetColumnMetadata.getTypeLengthInBits(columnDescriptor.getType());
       }
     }
     if(threadPool == null) {

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/FixedWidthRepeatedReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/FixedWidthRepeatedReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/FixedWidthRepeatedReader.java
index 6db7110..fa21dfa 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/FixedWidthRepeatedReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/FixedWidthRepeatedReader.java
@@ -1,4 +1,4 @@
-/*******************************************************************************
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetColumnMetadata.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetColumnMetadata.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetColumnMetadata.java
new file mode 100644
index 0000000..bbdf246
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetColumnMetadata.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.parquet.columnreaders;
+
+import java.util.Map;
+
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.expr.TypeHelper;
+import org.apache.drill.exec.physical.impl.OutputMutator;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.server.options.OptionManager;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.complex.RepeatedValueVector;
+import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.format.SchemaElement;
+import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
+import org.apache.parquet.schema.PrimitiveType;
+import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
+
+/**
+ * Represents a single column read from the Parquet file by the record reader.
+ */
+
+public class ParquetColumnMetadata {
+
+  ColumnDescriptor column;
+  private SchemaElement se;
+  MaterializedField field;
+  int length;
+  private MajorType type;
+  ColumnChunkMetaData columnChunkMetaData;
+  private ValueVector vector;
+
+  public ParquetColumnMetadata(ColumnDescriptor column) {
+    this.column = column;
+  }
+
+  public void resolveDrillType(Map<String, SchemaElement> schemaElements, OptionManager options) {
+    se = schemaElements.get(column.getPath()[0]);
+    type = ParquetToDrillTypeConverter.toMajorType(column.getType(), se.getType_length(),
+        getDataMode(column), se, options);
+    field = MaterializedField.create(toFieldName(column.getPath()), type);
+    length = getDataTypeLength();
+  }
+
+  private String toFieldName(String[] paths) {
+    return SchemaPath.getCompoundPath(paths).getAsUnescapedPath();
+  }
+
+  private TypeProtos.DataMode getDataMode(ColumnDescriptor column) {
+    if (isRepeated()) {
+      return DataMode.REPEATED;
+    } else if (column.getMaxDefinitionLevel() == 0) {
+      return TypeProtos.DataMode.REQUIRED;
+    } else {
+      return TypeProtos.DataMode.OPTIONAL;
+    }
+  }
+
+  /**
+   * @param type
+   * @param type a fixed length type from the parquet library enum
+   * @return the length in pageDataByteArray of the type
+   */
+  public static int getTypeLengthInBits(PrimitiveTypeName type) {
+    switch (type) {
+      case INT64:   return 64;
+      case INT32:   return 32;
+      case BOOLEAN: return 1;
+      case FLOAT:   return 32;
+      case DOUBLE:  return 64;
+      case INT96:   return 96;
+      // binary and fixed length byte array
+      default:
+        throw new IllegalStateException("Length cannot be determined for type " + type);
+    }
+  }
+
+  public static final int UNDEFINED_LENGTH = -1;
+
+  /**
+   * Returns data type length for a given {@see ColumnDescriptor} and it's corresponding
+   * {@see SchemaElement}. Neither is enough information alone as the max
+   * repetition level (indicating if it is an array type) is in the ColumnDescriptor and
+   * the length of a fixed width field is stored at the schema level.
+   *
+   * @return the length if fixed width, else <tt>UNDEFINED_LENGTH</tt> (-1)
+   */
+  private int getDataTypeLength() {
+    if (! isFixedLength()) {
+      return UNDEFINED_LENGTH;
+    } else if (isRepeated()) {
+      return UNDEFINED_LENGTH;
+    } else if (column.getType() == PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY) {
+      return se.getType_length() * 8;
+    } else {
+      return getTypeLengthInBits(column.getType());
+    }
+  }
+
+  public boolean isFixedLength( ) {
+    return column.getType() != PrimitiveType.PrimitiveTypeName.BINARY;
+  }
+
+  public boolean isRepeated() {
+    return column.getMaxRepetitionLevel() > 0;
+  }
+
+  ValueVector buildVector(OutputMutator output) throws SchemaChangeException {
+    Class<? extends ValueVector> vectorClass = TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode());
+    vector = output.addField(field, vectorClass);
+    return vector;
+  }
+
+  ColumnReader<?> makeFixedWidthReader(ParquetRecordReader reader, int recordsPerBatch) throws Exception {
+    return ColumnReaderFactory.createFixedColumnReader(reader, true,
+        column, columnChunkMetaData, recordsPerBatch, vector, se);
+  }
+
+  @SuppressWarnings("resource")
+  FixedWidthRepeatedReader makeRepeatedFixedWidthReader(ParquetRecordReader reader, int recordsPerBatch) throws Exception {
+    final RepeatedValueVector repeatedVector = RepeatedValueVector.class.cast(vector);
+    ColumnReader<?> dataReader = ColumnReaderFactory.createFixedColumnReader(reader, true,
+        column, columnChunkMetaData, recordsPerBatch,
+        repeatedVector.getDataVector(), se);
+    return new FixedWidthRepeatedReader(reader, dataReader,
+        getTypeLengthInBits(column.getType()), UNDEFINED_LENGTH, column, columnChunkMetaData, false, repeatedVector, se);
+  }
+
+  VarLengthValuesColumn<?> makeVariableWidthReader(ParquetRecordReader reader) throws ExecutionSetupException {
+    return ColumnReaderFactory.getReader(reader, UNDEFINED_LENGTH, column, columnChunkMetaData, false, vector, se);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetRecordReader.java
index 93c1214..cb75cfc 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetRecordReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetRecordReader.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -17,50 +17,31 @@
  */
 package org.apache.drill.exec.store.parquet.columnreaders;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.drill.common.exceptions.DrillRuntimeException;
 import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.TypeProtos.DataMode;
-import org.apache.drill.common.types.TypeProtos.MajorType;
-import org.apache.drill.common.types.Types;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.exception.OutOfMemoryException;
-import org.apache.drill.exec.expr.TypeHelper;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.ops.MetricDef;
 import org.apache.drill.exec.ops.OperatorContext;
 import org.apache.drill.exec.physical.impl.OutputMutator;
-import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.store.AbstractRecordReader;
 import org.apache.drill.exec.store.parquet.ParquetReaderStats;
 import org.apache.drill.exec.store.parquet.ParquetReaderUtility;
 import org.apache.drill.exec.vector.AllocationHelper;
-import org.apache.drill.exec.vector.NullableIntVector;
 import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.exec.vector.complex.RepeatedValueVector;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.parquet.column.ColumnDescriptor;
-import org.apache.parquet.format.SchemaElement;
 import org.apache.parquet.hadoop.CodecFactory;
-import org.apache.parquet.hadoop.metadata.BlockMetaData;
-import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
 import org.apache.parquet.hadoop.metadata.ParquetMetadata;
-import org.apache.parquet.schema.PrimitiveType;
 
 import com.google.common.base.Stopwatch;
 import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
 
 public class ParquetRecordReader extends AbstractRecordReader {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ParquetRecordReader.class);
@@ -69,11 +50,11 @@ public class ParquetRecordReader extends AbstractRecordReader {
   private static final int NUMBER_OF_VECTORS = 1;
   private static final long DEFAULT_BATCH_LENGTH = 256 * 1024 * NUMBER_OF_VECTORS; // 256kb
   private static final long DEFAULT_BATCH_LENGTH_IN_BITS = DEFAULT_BATCH_LENGTH * 8; // 256kb
-  private static final char DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH = 32*1024; // 32K
-  private static final int DEFAULT_RECORDS_TO_READ_IF_FIXED_WIDTH = 64*1024 - 1; // 64K - 1, max SV2 can address
-  private static final int NUM_RECORDS_TO_READ_NOT_SPECIFIED = -1;
+  static final char DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH = 32*1024; // 32K
+  static final int DEFAULT_RECORDS_TO_READ_IF_FIXED_WIDTH = 64*1024 - 1; // 64K - 1, max SV2 can address
+  static final int NUM_RECORDS_TO_READ_NOT_SPECIFIED = -1;
 
-  // When no column is required by the downstrea operator, ask SCAN to return a DEFAULT column. If such column does not exist,
+  // When no column is required by the downstream operator, ask SCAN to return a DEFAULT column. If such column does not exist,
   // it will return as a nullable-int column. If that column happens to exist, return that column.
   protected static final List<SchemaPath> DEFAULT_COLS_TO_READ = ImmutableList.of(SchemaPath.getSimplePath("_DEFAULT_COL_TO_READ_"));
 
@@ -85,37 +66,23 @@ public class ParquetRecordReader extends AbstractRecordReader {
   // used for clearing the first n bits of a byte
   public static final byte[] startBitMasks = {127, 63, 31, 15, 7, 3, 1};
 
-  private int bitWidthAllFixedFields;
-  private boolean allFieldsFixedLength;
-  private int recordsPerBatch;
   private OperatorContext operatorContext;
 
-  private List<ColumnReader<?>> columnStatuses;
   private FileSystem fileSystem;
   private final long batchSize;
   private long numRecordsToRead; // number of records to read
 
   Path hadoopPath;
-  private VarLenBinaryReader varLengthReader;
   private ParquetMetadata footer;
-  // This is a parallel list to the columns list above, it is used to determine the subset of the project
-  // pushdown columns that do not appear in this file
-  private boolean[] columnsFound;
-  // For columns not found in the file, we need to return a schema element with the correct number of values
-  // at that position in the schema. Currently this requires a vector be present. Here is a list of all of these vectors
-  // that need only have their value count set at the end of each call to next(), as the values default to null.
-  private List<NullableIntVector> nullFilledVectors;
-  // Keeps track of the number of records returned in the case where only columns outside of the file were selected.
-  // No actual data needs to be read out of the file, we only need to return batches until we have 'read' the number of
-  // records specified in the row group metadata
-  long mockRecordsRead;
 
   private final CodecFactory codecFactory;
   int rowGroupIndex;
-  long totalRecordsRead;
   private final FragmentContext fragmentContext;
   ParquetReaderUtility.DateCorruptionStatus dateCorruptionStatus;
 
+  ParquetSchema schema;
+  ReadState readState;
+
   public boolean useAsyncColReader;
   public boolean useAsyncPageReader;
   public boolean useBufferedReader;
@@ -127,8 +94,8 @@ public class ParquetRecordReader extends AbstractRecordReader {
   @SuppressWarnings("unused")
   private String name;
 
-
   public ParquetReaderStats parquetReaderStats = new ParquetReaderStats();
+  private BatchReader batchReader;
 
   public enum Metric implements MetricDef {
     NUM_DICT_PAGE_LOADS,         // Number of dictionary pages read
@@ -203,13 +170,7 @@ public class ParquetRecordReader extends AbstractRecordReader {
     this.footer = footer;
     this.dateCorruptionStatus = dateCorruptionStatus;
     this.fragmentContext = fragmentContext;
-    // Callers can pass -1 if they want to read all rows.
-    if (numRecordsToRead == NUM_RECORDS_TO_READ_NOT_SPECIFIED) {
-      this.numRecordsToRead = footer.getBlocks().get(rowGroupIndex).getRowCount();
-    } else {
-      assert (numRecordsToRead >= 0);
-      this.numRecordsToRead = Math.min(numRecordsToRead, footer.getBlocks().get(rowGroupIndex).getRowCount());
-    }
+    this.numRecordsToRead = numRecordsToRead;
     useAsyncColReader =
         fragmentContext.getOptions().getOption(ExecConstants.PARQUET_COLUMNREADER_ASYNC).bool_val;
     useAsyncPageReader =
@@ -255,50 +216,13 @@ public class ParquetRecordReader extends AbstractRecordReader {
   }
 
   public int getBitWidthAllFixedFields() {
-    return bitWidthAllFixedFields;
+    return schema.getBitWidthAllFixedFields();
   }
 
   public long getBatchSize() {
     return batchSize;
   }
 
-  /**
-   * @param type a fixed length type from the parquet library enum
-   * @return the length in pageDataByteArray of the type
-   */
-  public static int getTypeLengthInBits(PrimitiveType.PrimitiveTypeName type) {
-    switch (type) {
-      case INT64:   return 64;
-      case INT32:   return 32;
-      case BOOLEAN: return 1;
-      case FLOAT:   return 32;
-      case DOUBLE:  return 64;
-      case INT96:   return 96;
-      // binary and fixed length byte array
-      default:
-        throw new IllegalStateException("Length cannot be determined for type " + type);
-    }
-  }
-
-  private boolean fieldSelected(MaterializedField field) {
-    // TODO - not sure if this is how we want to represent this
-    // for now it makes the existing tests pass, simply selecting
-    // all available data if no columns are provided
-    if (isStarQuery()) {
-      return true;
-    }
-
-    int i = 0;
-    for (SchemaPath expr : getColumns()) {
-      if ( field.getPath().equalsIgnoreCase(expr.getAsUnescapedPath())) {
-        columnsFound[i] = true;
-        return true;
-      }
-      i++;
-    }
-    return false;
-  }
-
   public OperatorContext getOperatorContext() {
     return operatorContext;
   }
@@ -308,163 +232,50 @@ public class ParquetRecordReader extends AbstractRecordReader {
   }
 
   /**
-   * Returns data type length for a given {@see ColumnDescriptor} and it's corresponding
-   * {@see SchemaElement}. Neither is enough information alone as the max
-   * repetition level (indicating if it is an array type) is in the ColumnDescriptor and
-   * the length of a fixed width field is stored at the schema level.
-   *
-   * @return the length if fixed width, else -1
+   * Prepare the Parquet reader. First determine the set of columns to read (the schema
+   * for this read.) Then, create a state object to track the read across calls to
+   * the reader <tt>next()</tt> method. Finally, create one of three readers to
+   * read batches depending on whether this scan is for only fixed-width fields,
+   * contains at least one variable-width field, or is a "mock" scan consisting
+   * only of null fields (fields in the SELECT clause but not in the Parquet file.)
    */
-  private int getDataTypeLength(ColumnDescriptor column, SchemaElement se) {
-    if (column.getType() != PrimitiveType.PrimitiveTypeName.BINARY) {
-      if (column.getMaxRepetitionLevel() > 0) {
-        return -1;
-      }
-      if (column.getType() == PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY) {
-        return se.getType_length() * 8;
-      } else {
-        return getTypeLengthInBits(column.getType());
-      }
-    } else {
-      return -1;
-    }
-  }
 
-  @SuppressWarnings({ "resource", "unchecked" })
   @Override
   public void setup(OperatorContext operatorContext, OutputMutator output) throws ExecutionSetupException {
     this.operatorContext = operatorContext;
-    if (!isStarQuery()) {
-      columnsFound = new boolean[getColumns().size()];
-      nullFilledVectors = new ArrayList<>();
-    }
-    columnStatuses = new ArrayList<>();
-    List<ColumnDescriptor> columns = footer.getFileMetaData().getSchema().getColumns();
-    allFieldsFixedLength = true;
-    ColumnDescriptor column;
-    ColumnChunkMetaData columnChunkMetaData;
-    int columnsToScan = 0;
-    mockRecordsRead = 0;
-
-    MaterializedField field;
+    schema = new ParquetSchema(fragmentContext.getOptions(), rowGroupIndex, footer, isStarQuery() ? null : getColumns());
 
     logger.debug("Reading row group({}) with {} records in file {}.", rowGroupIndex, footer.getBlocks().get(rowGroupIndex).getRowCount(),
         hadoopPath.toUri().getPath());
-    totalRecordsRead = 0;
-
-    // TODO - figure out how to deal with this better once we add nested reading, note also look where this map is used below
-    // store a map from column name to converted types if they are non-null
-    Map<String, SchemaElement> schemaElements = ParquetReaderUtility.getColNameToSchemaElementMapping(footer);
-
-    // loop to add up the length of the fixed width columns and build the schema
-    for (int i = 0; i < columns.size(); ++i) {
-      column = columns.get(i);
-      SchemaElement se = schemaElements.get(column.getPath()[0]);
-      MajorType mt = ParquetToDrillTypeConverter.toMajorType(column.getType(), se.getType_length(),
-          getDataMode(column), se, fragmentContext.getOptions());
-      field = MaterializedField.create(toFieldName(column.getPath()), mt);
-      if ( ! fieldSelected(field)) {
-        continue;
-      }
-      columnsToScan++;
-      int dataTypeLength = getDataTypeLength(column, se);
-      if (dataTypeLength == -1) {
-        allFieldsFixedLength = false;
-      } else {
-        bitWidthAllFixedFields += dataTypeLength;
-      }
-    }
-
-    if (columnsToScan != 0  && allFieldsFixedLength) {
-      recordsPerBatch = (int) Math.min(Math.min(batchSize / bitWidthAllFixedFields,
-          footer.getBlocks().get(0).getColumns().get(0).getValueCount()), DEFAULT_RECORDS_TO_READ_IF_FIXED_WIDTH);
-    }
-    else {
-      recordsPerBatch = DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH;
-    }
 
     try {
-      ValueVector vector;
-      SchemaElement schemaElement;
-      final ArrayList<VarLengthColumn<? extends ValueVector>> varLengthColumns = new ArrayList<>();
-      // initialize all of the column read status objects
-      boolean fieldFixedLength;
-      // the column chunk meta-data is not guaranteed to be in the same order as the columns in the schema
-      // a map is constructed for fast access to the correct columnChunkMetadata to correspond
-      // to an element in the schema
-      Map<String, Integer> columnChunkMetadataPositionsInList = new HashMap<>();
-      BlockMetaData rowGroupMetadata = footer.getBlocks().get(rowGroupIndex);
-
-      int colChunkIndex = 0;
-      for (ColumnChunkMetaData colChunk : rowGroupMetadata.getColumns()) {
-        columnChunkMetadataPositionsInList.put(Arrays.toString(colChunk.getPath().toArray()), colChunkIndex);
-        colChunkIndex++;
-      }
-      for (int i = 0; i < columns.size(); ++i) {
-        column = columns.get(i);
-        columnChunkMetaData = rowGroupMetadata.getColumns().get(columnChunkMetadataPositionsInList.get(Arrays.toString(column.getPath())));
-        schemaElement = schemaElements.get(column.getPath()[0]);
-        MajorType type = ParquetToDrillTypeConverter.toMajorType(column.getType(), schemaElement.getType_length(),
-            getDataMode(column), schemaElement, fragmentContext.getOptions());
-        field = MaterializedField.create(toFieldName(column.getPath()), type);
-        // the field was not requested to be read
-        if ( ! fieldSelected(field)) {
-          continue;
-        }
-
-        fieldFixedLength = column.getType() != PrimitiveType.PrimitiveTypeName.BINARY;
-        vector = output.addField(field, (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()));
-        if (column.getType() != PrimitiveType.PrimitiveTypeName.BINARY) {
-          if (column.getMaxRepetitionLevel() > 0) {
-            final RepeatedValueVector repeatedVector = RepeatedValueVector.class.cast(vector);
-            ColumnReader<?> dataReader = ColumnReaderFactory.createFixedColumnReader(this, fieldFixedLength,
-                column, columnChunkMetaData, recordsPerBatch,
-                repeatedVector.getDataVector(), schemaElement);
-            varLengthColumns.add(new FixedWidthRepeatedReader(this, dataReader,
-                getTypeLengthInBits(column.getType()), -1, column, columnChunkMetaData, false, repeatedVector, schemaElement));
-          }
-          else {
-
-           ColumnReader<?> cr = ColumnReaderFactory.createFixedColumnReader(this, fieldFixedLength,
-                column, columnChunkMetaData, recordsPerBatch, vector,
-                schemaElement) ;
-            columnStatuses.add(cr);
-          }
-        } else {
-          // create a reader and add it to the appropriate list
-          varLengthColumns.add(ColumnReaderFactory.getReader(this, -1, column, columnChunkMetaData, false, vector, schemaElement));
-        }
-      }
-      varLengthReader = new VarLenBinaryReader(this, varLengthColumns);
-
-      if (!isStarQuery()) {
-        List<SchemaPath> projectedColumns = Lists.newArrayList(getColumns());
-        SchemaPath col;
-        for (int i = 0; i < columnsFound.length; i++) {
-          col = projectedColumns.get(i);
-          assert col!=null;
-          if ( ! columnsFound[i] && !col.equals(STAR_COLUMN)) {
-            nullFilledVectors.add((NullableIntVector)output.addField(MaterializedField.create(col.getAsUnescapedPath(),
-                    Types.optional(TypeProtos.MinorType.INT)),
-                (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(TypeProtos.MinorType.INT, DataMode.OPTIONAL)));
-
-          }
-        }
-      }
+      schema.buildSchema(batchSize);
+      readState = new ReadState(schema, parquetReaderStats, numRecordsToRead, useAsyncColReader);
+      readState.buildReader(this, output);
     } catch (Exception e) {
-      handleAndRaise("Failure in setting up reader", e);
+      throw handleException("Failure in setting up reader", e);
+    }
+
+    ColumnReader<?> firstColumnStatus = readState.getFirstColumnReader();
+    if (firstColumnStatus == null) {
+      batchReader = new BatchReader.MockBatchReader(readState);
+    } else if (schema.allFieldsFixedLength()) {
+      batchReader = new BatchReader.FixedWidthReader(readState);
+    } else {
+      batchReader = new BatchReader.VariableWidthReader(readState);
     }
   }
 
-  protected void handleAndRaise(String s, Exception e) {
+  protected DrillRuntimeException handleException(String s, Exception e) {
     String message = "Error in parquet record reader.\nMessage: " + s +
       "\nParquet Metadata: " + footer;
-    throw new DrillRuntimeException(message, e);
+    return new DrillRuntimeException(message, e);
   }
 
   @Override
   public void allocate(Map<String, ValueVector> vectorMap) throws OutOfMemoryException {
     try {
+      int recordsPerBatch = schema.getRecordsPerBatch();
       for (final ValueVector v : vectorMap.values()) {
         AllocationHelper.allocate(v, recordsPerBatch, 50, 10);
       }
@@ -473,251 +284,56 @@ public class ParquetRecordReader extends AbstractRecordReader {
     }
   }
 
-
-  private String toFieldName(String[] paths) {
-    return SchemaPath.getCompoundPath(paths).getAsUnescapedPath();
-  }
-
-  private TypeProtos.DataMode getDataMode(ColumnDescriptor column) {
-    if (column.getMaxRepetitionLevel() > 0 ) {
-      return DataMode.REPEATED;
-    } else if (column.getMaxDefinitionLevel() == 0) {
-      return TypeProtos.DataMode.REQUIRED;
-    } else {
-      return TypeProtos.DataMode.OPTIONAL;
-    }
-  }
-
-  private void resetBatch() {
-    for (final ColumnReader<?> column : columnStatuses) {
-      column.valuesReadInCurrentPass = 0;
-    }
-    for (final VarLengthColumn<?> r : varLengthReader.columns) {
-      r.valuesReadInCurrentPass = 0;
-    }
-  }
-
- public void readAllFixedFields(long recordsToRead) throws IOException {
-   Stopwatch timer = Stopwatch.createStarted();
-   if(useAsyncColReader){
-     readAllFixedFieldsParallel(recordsToRead) ;
-   } else {
-     readAllFixedFieldsSerial(recordsToRead);
-   }
-   parquetReaderStats.timeFixedColumnRead.addAndGet(timer.elapsed(TimeUnit.NANOSECONDS));
- }
-
-  public void readAllFixedFieldsSerial(long recordsToRead) throws IOException {
-    for (ColumnReader<?> crs : columnStatuses) {
-      crs.processPages(recordsToRead);
-    }
-  }
-
-  public void readAllFixedFieldsParallel(long recordsToRead) throws IOException {
-    ArrayList<Future<Long>> futures = Lists.newArrayList();
-    for (ColumnReader<?> crs : columnStatuses) {
-      Future<Long> f = crs.processPagesAsync(recordsToRead);
-      futures.add(f);
-    }
-    Exception exception = null;
-    for(Future<Long> f: futures){
-      if(exception != null) {
-        f.cancel(true);
-      } else {
-        try {
-          f.get();
-        } catch (Exception e) {
-          f.cancel(true);
-          exception = e;
-        }
-      }
-    }
-    if(exception != null){
-      handleAndRaise(null, exception);
-    }
-  }
+  /**
+   * Read the next record batch from the file using the reader and read state
+   * created previously.
+   */
 
   @Override
   public int next() {
-    resetBatch();
-    long recordsToRead = 0;
+    readState.resetBatch();
     Stopwatch timer = Stopwatch.createStarted();
     try {
-      ColumnReader<?> firstColumnStatus;
-      if (columnStatuses.size() > 0) {
-        firstColumnStatus = columnStatuses.iterator().next();
-      }
-      else{
-        if (varLengthReader.columns.size() > 0) {
-          firstColumnStatus = varLengthReader.columns.iterator().next();
-        }
-        else{
-          firstColumnStatus = null;
-        }
-      }
-      // No columns found in the file were selected, simply return a full batch of null records for each column requested
-      if (firstColumnStatus == null) {
-        if (mockRecordsRead == footer.getBlocks().get(rowGroupIndex).getRowCount()) {
-          parquetReaderStats.timeProcess.addAndGet(timer.elapsed(TimeUnit.NANOSECONDS));
-          return 0;
-        }
-        recordsToRead = Math.min(DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH, footer.getBlocks().get(rowGroupIndex).getRowCount() - mockRecordsRead);
-
-        // Pick the minimum of recordsToRead calculated above and numRecordsToRead (based on rowCount and limit).
-        recordsToRead = Math.min(recordsToRead, numRecordsToRead);
-
-        for (final ValueVector vv : nullFilledVectors ) {
-          vv.getMutator().setValueCount( (int) recordsToRead);
-        }
-        mockRecordsRead += recordsToRead;
-        totalRecordsRead += recordsToRead;
-        numRecordsToRead -= recordsToRead;
-        parquetReaderStats.timeProcess.addAndGet(timer.elapsed(TimeUnit.NANOSECONDS));
-        return (int) recordsToRead;
-      }
-
-      if (allFieldsFixedLength) {
-        recordsToRead = Math.min(recordsPerBatch, firstColumnStatus.columnChunkMetaData.getValueCount() - firstColumnStatus.totalValuesRead);
-      } else {
-        recordsToRead = DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH;
-
-      }
-
-      // Pick the minimum of recordsToRead calculated above and numRecordsToRead (based on rowCount and limit)
-      recordsToRead = Math.min(recordsToRead, numRecordsToRead);
-
-      if (allFieldsFixedLength) {
-        readAllFixedFields(recordsToRead);
-      } else { // variable length columns
-        long fixedRecordsToRead = varLengthReader.readFields(recordsToRead);
-        readAllFixedFields(fixedRecordsToRead);
-      }
-
-      // if we have requested columns that were not found in the file fill their vectors with null
-      // (by simply setting the value counts inside of them, as they start null filled)
-      if (nullFilledVectors != null) {
-        for (final ValueVector vv : nullFilledVectors ) {
-          vv.getMutator().setValueCount(firstColumnStatus.getRecordsReadInCurrentPass());
-        }
-      }
-
-//      logger.debug("So far read {} records out of row group({}) in file '{}'", totalRecordsRead, rowGroupIndex, hadoopPath.toUri().getPath());
-      totalRecordsRead += firstColumnStatus.getRecordsReadInCurrentPass();
-      numRecordsToRead -= firstColumnStatus.getRecordsReadInCurrentPass();
-      parquetReaderStats.timeProcess.addAndGet(timer.elapsed(TimeUnit.NANOSECONDS));
-
-      return firstColumnStatus.getRecordsReadInCurrentPass();
+      return batchReader.readBatch();
     } catch (Exception e) {
-      handleAndRaise("\nHadoop path: " + hadoopPath.toUri().getPath() +
-        "\nTotal records read: " + totalRecordsRead +
-        "\nMock records read: " + mockRecordsRead +
-        "\nRecords to read: " + recordsToRead +
+      throw handleException("\nHadoop path: " + hadoopPath.toUri().getPath() +
+        "\nTotal records read: " + readState.recordsRead() +
         "\nRow group index: " + rowGroupIndex +
         "\nRecords in row group: " + footer.getBlocks().get(rowGroupIndex).getRowCount(), e);
+    } finally {
+      parquetReaderStats.timeProcess.addAndGet(timer.elapsed(TimeUnit.NANOSECONDS));
     }
-
-    // this is never reached
-    return 0;
   }
 
   @Override
   public void close() {
-    logger.debug("Read {} records out of row group({}) in file '{}'", totalRecordsRead, rowGroupIndex,
+    long recordsRead = (readState == null) ? 0 : readState.recordsRead();
+    logger.debug("Read {} records out of row group({}) in file '{}'",
+        recordsRead, rowGroupIndex,
         hadoopPath.toUri().getPath());
     // enable this for debugging when it is know that a whole file will be read
     // limit kills upstream operators once it has enough records, so this assert will fail
 //    assert totalRecordsRead == footer.getBlocks().get(rowGroupIndex).getRowCount();
-    if (columnStatuses != null) {
-      for (final ColumnReader<?> column : columnStatuses) {
-        column.clear();
-      }
-      columnStatuses.clear();
-      columnStatuses = null;
+    if (readState != null) {
+      readState.close();
+      readState = null;
     }
 
     codecFactory.release();
 
-    if (varLengthReader != null) {
-      for (final VarLengthColumn<?> r : varLengthReader.columns) {
-        r.clear();
-      }
-      varLengthReader.columns.clear();
-      varLengthReader = null;
-    }
-
-
-    if(parquetReaderStats != null) {
+    if (parquetReaderStats != null) {
       updateStats();
-      logger.trace(
-          "ParquetTrace,Summary,{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}",
-          hadoopPath,
-          parquetReaderStats.numDictPageLoads,
-          parquetReaderStats.numDataPageLoads,
-          parquetReaderStats.numDataPagesDecoded,
-          parquetReaderStats.numDictPagesDecompressed,
-          parquetReaderStats.numDataPagesDecompressed,
-          parquetReaderStats.totalDictPageReadBytes,
-          parquetReaderStats.totalDataPageReadBytes,
-          parquetReaderStats.totalDictDecompressedBytes,
-          parquetReaderStats.totalDataDecompressedBytes,
-          parquetReaderStats.timeDictPageLoads,
-          parquetReaderStats.timeDataPageLoads,
-          parquetReaderStats.timeDataPageDecode,
-          parquetReaderStats.timeDictPageDecode,
-          parquetReaderStats.timeDictPagesDecompressed,
-          parquetReaderStats.timeDataPagesDecompressed,
-          parquetReaderStats.timeDiskScanWait,
-          parquetReaderStats.timeDiskScan,
-          parquetReaderStats.timeFixedColumnRead,
-          parquetReaderStats.timeVarColumnRead
-      );
-      parquetReaderStats=null;
+      parquetReaderStats.logStats(logger, hadoopPath);
+      parquetReaderStats = null;
     }
-
   }
 
-  private void updateStats(){
-
-    operatorContext.getStats().addLongStat(Metric.NUM_DICT_PAGE_LOADS,
-        parquetReaderStats.numDictPageLoads.longValue());
-    operatorContext.getStats().addLongStat(Metric.NUM_DATA_PAGE_lOADS, parquetReaderStats.numDataPageLoads.longValue());
-    operatorContext.getStats().addLongStat(Metric.NUM_DATA_PAGES_DECODED, parquetReaderStats.numDataPagesDecoded.longValue());
-    operatorContext.getStats().addLongStat(Metric.NUM_DICT_PAGES_DECOMPRESSED,
-        parquetReaderStats.numDictPagesDecompressed.longValue());
-    operatorContext.getStats().addLongStat(Metric.NUM_DATA_PAGES_DECOMPRESSED,
-        parquetReaderStats.numDataPagesDecompressed.longValue());
-    operatorContext.getStats().addLongStat(Metric.TOTAL_DICT_PAGE_READ_BYTES,
-        parquetReaderStats.totalDictPageReadBytes.longValue());
-    operatorContext.getStats().addLongStat(Metric.TOTAL_DATA_PAGE_READ_BYTES,
-        parquetReaderStats.totalDataPageReadBytes.longValue());
-    operatorContext.getStats().addLongStat(Metric.TOTAL_DICT_DECOMPRESSED_BYTES,
-        parquetReaderStats.totalDictDecompressedBytes.longValue());
-    operatorContext.getStats().addLongStat(Metric.TOTAL_DATA_DECOMPRESSED_BYTES,
-        parquetReaderStats.totalDataDecompressedBytes.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_DICT_PAGE_LOADS,
-        parquetReaderStats.timeDictPageLoads.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_DATA_PAGE_LOADS,
-        parquetReaderStats.timeDataPageLoads.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_DATA_PAGE_DECODE,
-        parquetReaderStats.timeDataPageDecode.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_DICT_PAGE_DECODE,
-        parquetReaderStats.timeDictPageDecode.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_DICT_PAGES_DECOMPRESSED,
-        parquetReaderStats.timeDictPagesDecompressed.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_DATA_PAGES_DECOMPRESSED,
-        parquetReaderStats.timeDataPagesDecompressed.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_DISK_SCAN_WAIT,
-        parquetReaderStats.timeDiskScanWait.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_DISK_SCAN, parquetReaderStats.timeDiskScan.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_FIXEDCOLUMN_READ, parquetReaderStats.timeFixedColumnRead.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_VARCOLUMN_READ, parquetReaderStats.timeVarColumnRead.longValue());
-    operatorContext.getStats().addLongStat(Metric.TIME_PROCESS, parquetReaderStats.timeProcess.longValue());
-
+  private void updateStats() {
+    parquetReaderStats.update(operatorContext.getStats());
   }
 
   @Override
   protected List<SchemaPath> getDefaultColumnsToRead() {
     return DEFAULT_COLS_TO_READ;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetSchema.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetSchema.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetSchema.java
new file mode 100644
index 0000000..ab4b1b8
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetSchema.java
@@ -0,0 +1,265 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.parquet.columnreaders;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.expr.TypeHelper;
+import org.apache.drill.exec.physical.impl.OutputMutator;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.server.options.OptionManager;
+import org.apache.drill.exec.store.parquet.ParquetReaderUtility;
+import org.apache.drill.exec.vector.NullableIntVector;
+import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.format.SchemaElement;
+import org.apache.parquet.hadoop.metadata.BlockMetaData;
+import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
+import org.apache.parquet.hadoop.metadata.ParquetMetadata;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Mapping from the schema of the Parquet file to that of the record reader
+ * to the schema that Drill and the Parquet reader uses.
+ */
+
+public class ParquetSchema {
+  /**
+   * Set of columns specified in the SELECT clause. Will be null for
+   * a SELECT * query.
+   */
+  private final Collection<SchemaPath> selectedCols;
+
+  /**
+   * Parallel list to the columns list above, it is used to determine the subset of the project
+   * pushdown columns that do not appear in this file.
+   */
+  private final boolean[] columnsFound;
+  private final OptionManager options;
+  private final int rowGroupIndex;
+  private final ParquetMetadata footer;
+
+  /**
+   * List of metadata for selected columns. This list does two things.
+   * First, it identifies the Parquet columns we wish to select. Second, it
+   * provides metadata for those columns. Note that null columns (columns
+   * in the SELECT clause but not in the file) appear elsewhere.
+   */
+  private List<ParquetColumnMetadata> selectedColumnMetadata = new ArrayList<>();
+  private int bitWidthAllFixedFields;
+  private boolean allFieldsFixedLength;
+  private long groupRecordCount;
+  private int recordsPerBatch;
+
+  /**
+   * Build the Parquet schema. The schema can be based on a "SELECT *",
+   * meaning we want all columns defined in the Parquet file. In this case,
+   * the list of selected columns is null. Or, the query can be based on
+   * an explicit list of selected columns. In this case, the
+   * columns need not exist in the Parquet file. If a column does not exist,
+   * the reader returns null for that column. If no selected column exists
+   * in the file, then we return "mock" records: records with only null
+   * values, but repeated for the number of rows in the Parquet file.
+   *
+   * @param options session options
+   * @param rowGroupIndex row group to read
+   * @param selectedCols columns specified in the SELECT clause, or null if
+   * this is a SELECT * query
+   */
+
+  public ParquetSchema(OptionManager options, int rowGroupIndex, ParquetMetadata footer, Collection<SchemaPath> selectedCols) {
+    this.options = options;
+    this.rowGroupIndex = rowGroupIndex;
+    this.selectedCols = selectedCols;
+    this.footer = footer;
+    if (selectedCols == null) {
+      columnsFound = null;
+    } else {
+      columnsFound = new boolean[selectedCols.size()];
+    }
+  }
+
+  /**
+   * Build the schema for this read as a combination of the schema specified in
+   * the Parquet footer and the list of columns selected in the query.
+   *
+   * @param footer Parquet metadata
+   * @param batchSize target size of the batch, in rows
+   * @throws Exception if anything goes wrong
+   */
+
+  public void buildSchema(long batchSize) throws Exception {
+    groupRecordCount = footer.getBlocks().get(rowGroupIndex).getRowCount();
+    loadParquetSchema();
+    computeFixedPart();
+
+    if (! selectedColumnMetadata.isEmpty()  && allFieldsFixedLength) {
+      recordsPerBatch = (int) Math.min(Math.min(batchSize / bitWidthAllFixedFields,
+          footer.getBlocks().get(0).getColumns().get(0).getValueCount()), ParquetRecordReader.DEFAULT_RECORDS_TO_READ_IF_FIXED_WIDTH);
+    }
+    else {
+      recordsPerBatch = ParquetRecordReader.DEFAULT_RECORDS_TO_READ_IF_VARIABLE_WIDTH;
+    }
+  }
+
+  /**
+   * Scan the Parquet footer, then map each Parquet column to the list of columns
+   * we want to read. Track those to be read.
+   */
+
+  private void loadParquetSchema() {
+    // TODO - figure out how to deal with this better once we add nested reading, note also look where this map is used below
+    // store a map from column name to converted types if they are non-null
+    Map<String, SchemaElement> schemaElements = ParquetReaderUtility.getColNameToSchemaElementMapping(footer);
+
+    // loop to add up the length of the fixed width columns and build the schema
+    for (ColumnDescriptor column : footer.getFileMetaData().getSchema().getColumns()) {
+      ParquetColumnMetadata columnMetadata = new ParquetColumnMetadata(column);
+      columnMetadata.resolveDrillType(schemaElements, options);
+      if (! fieldSelected(columnMetadata.field)) {
+        continue;
+      }
+      selectedColumnMetadata.add(columnMetadata);
+    }
+  }
+
+  /**
+   * Fixed-width fields are the easiest to plan. We know the size of each column,
+   * making it easy to determine the total length of each vector, once we know
+   * the target record count. A special reader is used in the fortunate case
+   * that all fields are fixed width.
+   */
+
+  private void computeFixedPart() {
+    allFieldsFixedLength = true;
+    for (ParquetColumnMetadata colMd : selectedColumnMetadata) {
+      if (colMd.isFixedLength()) {
+        bitWidthAllFixedFields += colMd.length;
+      } else {
+        allFieldsFixedLength = false;
+      }
+    }
+  }
+
+  public boolean isStarQuery() { return selectedCols == null; }
+  public ParquetMetadata footer() { return footer; }
+  public int getBitWidthAllFixedFields() { return bitWidthAllFixedFields; }
+  public int getRecordsPerBatch() { return recordsPerBatch; }
+  public boolean allFieldsFixedLength() { return allFieldsFixedLength; }
+  public List<ParquetColumnMetadata> getColumnMetadata() { return selectedColumnMetadata; }
+
+  /**
+   * Return the Parquet file row count.
+   *
+   * @return number of records in the Parquet row group
+   */
+
+  public long getGroupRecordCount() { return groupRecordCount; }
+
+  public BlockMetaData getRowGroupMetadata() {
+    return footer.getBlocks().get(rowGroupIndex);
+  }
+
+  /**
+   * Determine if a Parquet field is selected for the query. It is selected
+   * either if this is a star query (we want all columns), or the column
+   * appers in the select list.
+   *
+   * @param field the Parquet column expressed as as Drill field.
+   * @return true if the column is to be included in the scan, false
+   * if not
+   */
+
+  private boolean fieldSelected(MaterializedField field) {
+    // TODO - not sure if this is how we want to represent this
+    // for now it makes the existing tests pass, simply selecting
+    // all available data if no columns are provided
+    if (isStarQuery()) {
+      return true;
+    }
+
+    int i = 0;
+    for (SchemaPath expr : selectedCols) {
+      if ( field.getPath().equalsIgnoreCase(expr.getAsUnescapedPath())) {
+        columnsFound[i] = true;
+        return true;
+      }
+      i++;
+    }
+    return false;
+  }
+
+  /**
+   * Create "dummy" fields for columns which are selected in the SELECT clause, but not
+   * present in the Parquet schema.
+   * @param output the output container
+   * @throws SchemaChangeException should not occur
+   */
+
+  public void createNonExistentColumns(OutputMutator output, List<NullableIntVector> nullFilledVectors) throws SchemaChangeException {
+    List<SchemaPath> projectedColumns = Lists.newArrayList(selectedCols);
+    for (int i = 0; i < columnsFound.length; i++) {
+      SchemaPath col = projectedColumns.get(i);
+      assert col != null;
+      if ( ! columnsFound[i] && ! col.equals(ParquetRecordReader.STAR_COLUMN)) {
+        nullFilledVectors.add(createMissingColumn(col, output));
+      }
+    }
+  }
+
+  /**
+   * Create a "dummy" column for a missing field. The column is of type optional
+   * int, but will always be null.
+   *
+   * @param col the selected, but non-existent, schema path
+   * @param output the output container
+   * @return the value vector for the field
+   * @throws SchemaChangeException should not occur
+   */
+
+  private NullableIntVector createMissingColumn(SchemaPath col, OutputMutator output) throws SchemaChangeException {
+    MaterializedField field = MaterializedField.create(col.getAsUnescapedPath(),
+                          Types.optional(TypeProtos.MinorType.INT));
+    return (NullableIntVector) output.addField(field,
+              TypeHelper.getValueVectorClass(TypeProtos.MinorType.INT, DataMode.OPTIONAL));
+  }
+
+  Map<String, Integer> buildChunkMap(BlockMetaData rowGroupMetadata) {
+    // the column chunk meta-data is not guaranteed to be in the same order as the columns in the schema
+    // a map is constructed for fast access to the correct columnChunkMetadata to correspond
+    // to an element in the schema
+    Map<String, Integer> columnChunkMetadataPositionsInList = new HashMap<>();
+
+    int colChunkIndex = 0;
+    for (ColumnChunkMetaData colChunk : rowGroupMetadata.getColumns()) {
+      columnChunkMetadataPositionsInList.put(Arrays.toString(colChunk.getPath().toArray()), colChunkIndex);
+      colChunkIndex++;
+    }
+    return columnChunkMetadataPositionsInList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ReadState.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ReadState.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ReadState.java
new file mode 100644
index 0000000..f94edf1
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ReadState.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.parquet.columnreaders;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.drill.exec.physical.impl.OutputMutator;
+import org.apache.drill.exec.store.parquet.ParquetReaderStats;
+import org.apache.drill.exec.vector.NullableIntVector;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.hadoop.metadata.BlockMetaData;
+
+/**
+ * Internal state for reading from a Parquet file. Tracks information
+ * required from one call of <tt>next()</tt> to the next.
+ * <p>
+ * At present, this is a bit of a muddle as it holds all read state.
+ * As such, this is a snapshot of a refactoring effort. Subsequent passes
+ * will move state into specific readers where possible.
+ */
+
+public class ReadState {
+  private final ParquetSchema schema;
+  private final ParquetReaderStats parquetReaderStats;
+  private VarLenBinaryReader varLengthReader;
+  /**
+   * For columns not found in the file, we need to return a schema element with the correct number of values
+   * at that position in the schema. Currently this requires a vector be present. Here is a list of all of these vectors
+   * that need only have their value count set at the end of each call to next(), as the values default to null.
+   */
+  private List<NullableIntVector> nullFilledVectors;
+  private List<ColumnReader<?>> columnReaders = new ArrayList<>();
+  private long numRecordsToRead; // number of records to read
+  /**
+   * Keeps track of the number of records read thus far.
+   * <p>
+   * Also keeps track of the number of records returned in the case where only columns outside of the file were selected.
+   * No actual data needs to be read out of the file, we only need to return batches until we have 'read' the number of
+   * records specified in the row group metadata.
+   */
+  private long totalRecordsRead;
+  private boolean useAsyncColReader;
+
+  public ReadState(ParquetSchema schema, ParquetReaderStats parquetReaderStats, long numRecordsToRead, boolean useAsyncColReader) {
+    this.schema = schema;
+    this.parquetReaderStats = parquetReaderStats;
+    this.useAsyncColReader = useAsyncColReader;
+    if (! schema.isStarQuery()) {
+      nullFilledVectors = new ArrayList<>();
+    }
+    // Callers can pass -1 if they want to read all rows.
+    if (numRecordsToRead == ParquetRecordReader.NUM_RECORDS_TO_READ_NOT_SPECIFIED) {
+      this.numRecordsToRead = schema.getGroupRecordCount();
+    } else {
+      assert (numRecordsToRead >= 0);
+      this.numRecordsToRead = Math.min(numRecordsToRead, schema.getGroupRecordCount());
+    }
+  }
+
+  /**
+   * Create the readers needed to read columns: fixed-length or variable length.
+   *
+   * @param reader
+   * @param output
+   * @throws Exception
+   */
+
+  @SuppressWarnings("unchecked")
+  public void buildReader(ParquetRecordReader reader, OutputMutator output) throws Exception {
+    final ArrayList<VarLengthColumn<? extends ValueVector>> varLengthColumns = new ArrayList<>();
+    // initialize all of the column read status objects
+    BlockMetaData rowGroupMetadata = schema.getRowGroupMetadata();
+    Map<String, Integer> columnChunkMetadataPositionsInList = schema.buildChunkMap(rowGroupMetadata);
+    for (ParquetColumnMetadata columnMetadata : schema.getColumnMetadata()) {
+      ColumnDescriptor column = columnMetadata.column;
+      columnMetadata.columnChunkMetaData = rowGroupMetadata.getColumns().get(
+                      columnChunkMetadataPositionsInList.get(Arrays.toString(column.getPath())));
+      columnMetadata.buildVector(output);
+      if (! columnMetadata.isFixedLength( )) {
+        // create a reader and add it to the appropriate list
+        varLengthColumns.add(columnMetadata.makeVariableWidthReader(reader));
+      } else if (columnMetadata.isRepeated()) {
+        varLengthColumns.add(columnMetadata.makeRepeatedFixedWidthReader(reader, schema.getRecordsPerBatch()));
+      }
+      else {
+        columnReaders.add(columnMetadata.makeFixedWidthReader(reader, schema.getRecordsPerBatch()));
+      }
+    }
+    varLengthReader = new VarLenBinaryReader(reader, varLengthColumns);
+    if (! schema.isStarQuery()) {
+      schema.createNonExistentColumns(output, nullFilledVectors);
+    }
+  }
+
+  /**
+   * Several readers use the first column reader to get information about the whole
+   * record or group (such as row count.)
+   *
+   * @return the reader for the first column
+   */
+
+  public ColumnReader<?> getFirstColumnReader() {
+    if (columnReaders.size() > 0) {
+      return columnReaders.get(0);
+    }
+    else if (varLengthReader.columns.size() > 0) {
+      return varLengthReader.columns.get(0);
+    } else {
+      return null;
+    }
+  }
+
+  public void resetBatch() {
+    for (final ColumnReader<?> column : columnReaders) {
+      column.valuesReadInCurrentPass = 0;
+    }
+    for (final VarLengthColumn<?> r : varLengthReader.columns) {
+      r.valuesReadInCurrentPass = 0;
+    }
+  }
+
+  public ParquetSchema schema() { return schema; }
+  public List<ColumnReader<?>> getColumnReaders() { return columnReaders; }
+  public long recordsRead() { return totalRecordsRead; }
+  public VarLenBinaryReader varLengthReader() { return varLengthReader; }
+  public long getRecordsToRead() { return numRecordsToRead; }
+  public boolean useAsyncColReader() { return useAsyncColReader; }
+  public ParquetReaderStats parquetReaderStats() { return parquetReaderStats; }
+
+  /**
+   * When the SELECT clause references columns that do not exist in the Parquet
+   * file, we don't issue an error; instead we simply make up a column and
+   * fill it with nulls. This method does the work of null-filling the made-up
+   * vectors.
+   *
+   * @param readCount the number of rows read in the present record batch,
+   * which is the number of null column values to create
+   */
+
+  public void fillNullVectors(int readCount) {
+
+    // if we have requested columns that were not found in the file fill their vectors with null
+    // (by simply setting the value counts inside of them, as they start null filled)
+
+    if (nullFilledVectors != null) {
+      for (final ValueVector vv : nullFilledVectors ) {
+        vv.getMutator().setValueCount(readCount);
+      }
+    }
+  }
+
+  public void updateCounts(int readCount) {
+    totalRecordsRead += readCount;
+    numRecordsToRead -= readCount;
+  }
+
+  public void close() {
+    if (columnReaders != null) {
+      for (final ColumnReader<?> column : columnReaders) {
+        column.clear();
+      }
+      columnReaders.clear();
+      columnReaders = null;
+    }
+    if (varLengthReader != null) {
+      for (final VarLengthColumn<? extends ValueVector> r : varLengthReader.columns) {
+        r.clear();
+      }
+      varLengthReader.columns.clear();
+      varLengthReader = null;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet2/DrillParquetGroupConverter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet2/DrillParquetGroupConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet2/DrillParquetGroupConverter.java
index 79dc740..5c8db91 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet2/DrillParquetGroupConverter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet2/DrillParquetGroupConverter.java
@@ -47,6 +47,7 @@ import org.apache.drill.exec.expr.holders.VarCharHolder;
 import org.apache.drill.exec.physical.impl.OutputMutator;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.parquet.ParquetReaderUtility;
+import org.apache.drill.exec.store.parquet.columnreaders.ParquetColumnMetadata;
 import org.apache.drill.exec.store.parquet.columnreaders.ParquetRecordReader;
 import org.apache.drill.exec.util.DecimalUtility;
 import org.apache.drill.exec.vector.complex.impl.ComplexWriterImpl;
@@ -169,6 +170,7 @@ public class DrillParquetGroupConverter extends GroupConverter {
     }
   }
 
+  @SuppressWarnings("resource")
   private PrimitiveConverter getConverterForType(String name, PrimitiveType type) {
 
     switch(type.getPrimitiveTypeName()) {
@@ -236,7 +238,7 @@ public class DrillParquetGroupConverter extends GroupConverter {
             return new DrillFixedBinaryToTimeStampConverter(writer);
           } else {
             VarBinaryWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varBinary() : mapWriter.varBinary(name);
-            return new DrillFixedBinaryToVarbinaryConverter(writer, ParquetRecordReader.getTypeLengthInBits(type.getPrimitiveTypeName()) / 8, mutator.getManagedBuffer());
+            return new DrillFixedBinaryToVarbinaryConverter(writer, ParquetColumnMetadata.getTypeLengthInBits(type.getPrimitiveTypeName()) / 8, mutator.getManagedBuffer());
           }
         }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetInternalsTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetInternalsTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetInternalsTest.java
new file mode 100644
index 0000000..60e466d
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetInternalsTest.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.parquet;
+
+import org.apache.drill.TestBuilder;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.Types;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterTest;
+import org.apache.drill.test.FixtureBuilder;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class ParquetInternalsTest extends ClusterTest {
+
+  @BeforeClass
+  public static void setup( ) throws Exception {
+    FixtureBuilder builder = ClusterFixture.builder()
+      // Set options, etc.
+      ;
+    startCluster(builder);
+  }
+
+  @Test
+  public void testFixedWidth() throws Exception {
+    String sql = "SELECT l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity\n" +
+                 "FROM `cp`.`tpch/lineitem.parquet` LIMIT 20";
+//    client.queryBuilder().sql(sql).printCsv();
+
+    Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
+    typeMap.put(TestBuilder.parsePath("l_orderkey"), Types.required(TypeProtos.MinorType.INT));
+    typeMap.put(TestBuilder.parsePath("l_partkey"), Types.required(TypeProtos.MinorType.INT));
+    typeMap.put(TestBuilder.parsePath("l_suppkey"), Types.required(TypeProtos.MinorType.INT));
+    typeMap.put(TestBuilder.parsePath("l_linenumber"), Types.required(TypeProtos.MinorType.INT));
+    typeMap.put(TestBuilder.parsePath("l_quantity"), Types.required(TypeProtos.MinorType.FLOAT8));
+    client.testBuilder()
+      .sqlQuery(sql)
+      .unOrdered()
+      .csvBaselineFile("parquet/expected/fixedWidth.csv")
+      .baselineColumns("l_orderkey", "l_partkey", "l_suppkey", "l_linenumber", "l_quantity")
+      .baselineTypes(typeMap)
+      .build()
+      .run();
+  }
+
+  @Test
+  public void testVariableWidth() throws Exception {
+    String sql = "SELECT s_name, s_address, s_phone, s_comment\n" +
+                 "FROM `cp`.`tpch/supplier.parquet` LIMIT 20";
+//    client.queryBuilder().sql(sql).printCsv();
+
+    Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
+    typeMap.put(TestBuilder.parsePath("s_name"), Types.required(TypeProtos.MinorType.VARCHAR));
+    typeMap.put(TestBuilder.parsePath("s_address"), Types.required(TypeProtos.MinorType.VARCHAR));
+    typeMap.put(TestBuilder.parsePath("s_phone"), Types.required(TypeProtos.MinorType.VARCHAR));
+    typeMap.put(TestBuilder.parsePath("s_comment"), Types.required(TypeProtos.MinorType.VARCHAR));
+    client.testBuilder()
+      .sqlQuery(sql)
+      .unOrdered()
+      .csvBaselineFile("parquet/expected/variableWidth.csv")
+      .baselineColumns("s_name", "s_address", "s_phone", "s_comment")
+      .baselineTypes(typeMap)
+      .build()
+      .run();
+  }
+
+  @Test
+  public void testMixedWidth() throws Exception {
+    String sql = "SELECT s_suppkey, s_name, s_address, s_phone, s_acctbal\n" +
+                 "FROM `cp`.`tpch/supplier.parquet` LIMIT 20";
+//    client.queryBuilder().sql(sql).printCsv();
+
+    Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
+    typeMap.put(TestBuilder.parsePath("s_suppkey"), Types.required(TypeProtos.MinorType.INT));
+    typeMap.put(TestBuilder.parsePath("s_name"), Types.required(TypeProtos.MinorType.VARCHAR));
+    typeMap.put(TestBuilder.parsePath("s_address"), Types.required(TypeProtos.MinorType.VARCHAR));
+    typeMap.put(TestBuilder.parsePath("s_phone"), Types.required(TypeProtos.MinorType.VARCHAR));
+    typeMap.put(TestBuilder.parsePath("s_acctbal"), Types.required(TypeProtos.MinorType.FLOAT8));
+    client.testBuilder()
+      .sqlQuery(sql)
+      .unOrdered()
+      .csvBaselineFile("parquet/expected/mixedWidth.csv")
+      .baselineColumns("s_suppkey", "s_name", "s_address", "s_phone", "s_acctbal")
+      .baselineTypes(typeMap)
+      .build()
+      .run();
+  }
+
+  @Test
+  public void testStar() throws Exception {
+    String sql = "SELECT *\n" +
+                 "FROM `cp`.`tpch/supplier.parquet` LIMIT 20";
+//    client.queryBuilder().sql(sql).printCsv();
+
+    Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
+    typeMap.put(TestBuilder.parsePath("s_suppkey"), Types.required(TypeProtos.MinorType.INT));
+    typeMap.put(TestBuilder.parsePath("s_name"), Types.required(TypeProtos.MinorType.VARCHAR));
+    typeMap.put(TestBuilder.parsePath("s_address"), Types.required(TypeProtos.MinorType.VARCHAR));
+    typeMap.put(TestBuilder.parsePath("s_nationkey"), Types.required(TypeProtos.MinorType.INT));
+    typeMap.put(TestBuilder.parsePath("s_phone"), Types.required(TypeProtos.MinorType.VARCHAR));
+    typeMap.put(TestBuilder.parsePath("s_acctbal"), Types.required(TypeProtos.MinorType.FLOAT8));
+    typeMap.put(TestBuilder.parsePath("s_comment"), Types.required(TypeProtos.MinorType.VARCHAR));
+    client.testBuilder()
+      .sqlQuery(sql)
+      .unOrdered()
+      .csvBaselineFile("parquet/expected/star.csv")
+      .baselineColumns("s_suppkey", "s_name", "s_address", "s_nationkey", "s_phone", "s_acctbal", "s_comment")
+      .baselineTypes(typeMap)
+      .build()
+      .run();
+  }
+
+  @Test
+  public void testMissing() throws Exception {
+    String sql = "SELECT s_suppkey, bogus\n" +
+                 "FROM `cp`.`tpch/supplier.parquet` LIMIT 20";
+
+    // This test should return nothing but nulls. At present, the test
+    // framework can't check this case. Temporarily dumping the query
+    // to a CSV file to the console.
+    // TODO: Once the "row set" fixture is available, use that to verify
+    // that all rows are null.
+
+//    client.queryBuilder().sql(sql).printCsv();
+
+    // Can't handle nulls this way...
+//    Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
+//    typeMap.put(TestBuilder.parsePath("s_suppkey"), Types.required(TypeProtos.MinorType.INT));
+//    typeMap.put(TestBuilder.parsePath("bogus"), Types.optional(TypeProtos.MinorType.INT));
+//    client.testBuilder()
+//      .sqlQuery(sql)
+//      .unOrdered()
+//      .csvBaselineFile("parquet/expected/bogus.csv")
+//      .baselineColumns("s_suppkey", "bogus")
+//      .baselineTypes(typeMap)
+//      .build()
+//      .run();
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/test/resources/parquet/expected/bogus.csv
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/parquet/expected/bogus.csv b/exec/java-exec/src/test/resources/parquet/expected/bogus.csv
new file mode 100644
index 0000000..52af180
--- /dev/null
+++ b/exec/java-exec/src/test/resources/parquet/expected/bogus.csv
@@ -0,0 +1,20 @@
+1,
+2,
+3,
+4,
+5,
+6,
+7,
+8,
+9,
+10,
+11,
+12,
+13,
+14,
+15,
+16,
+17,
+18,
+19,
+20,

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/test/resources/parquet/expected/fixedWidth.csv
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/parquet/expected/fixedWidth.csv b/exec/java-exec/src/test/resources/parquet/expected/fixedWidth.csv
new file mode 100644
index 0000000..198c9b4
--- /dev/null
+++ b/exec/java-exec/src/test/resources/parquet/expected/fixedWidth.csv
@@ -0,0 +1,20 @@
+1,1552,93,1,17.0
+1,674,75,2,36.0
+1,637,38,3,8.0
+1,22,48,4,28.0
+1,241,23,5,24.0
+1,157,10,6,32.0
+2,1062,33,1,38.0
+3,43,19,1,45.0
+3,191,70,2,49.0
+3,1285,60,3,27.0
+3,294,22,4,2.0
+3,1831,61,5,28.0
+3,622,16,6,26.0
+4,881,81,1,30.0
+5,1086,87,1,15.0
+5,1240,41,2,26.0
+5,376,5,3,50.0
+6,1397,36,1,37.0
+7,1821,51,1,12.0
+7,1453,93,2,9.0

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/test/resources/parquet/expected/mixedWidth.csv
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/parquet/expected/mixedWidth.csv b/exec/java-exec/src/test/resources/parquet/expected/mixedWidth.csv
new file mode 100644
index 0000000..8956083
--- /dev/null
+++ b/exec/java-exec/src/test/resources/parquet/expected/mixedWidth.csv
@@ -0,0 +1,20 @@
+1,"Supplier#000000001"," N kD4on9OM Ipw3,gf0JBoQDd7tgrzrddZ","27-918-335-1736",5755.94
+2,"Supplier#000000002","89eJ5ksX3ImxJQBvxObC,","15-679-861-2259",4032.68
+3,"Supplier#000000003","q1,G3Pj6OjIuUYfUoH18BFTKP5aU9bEV3","11-383-516-1199",4192.4
+4,"Supplier#000000004","Bk7ah4CK8SYQTepEmvMkkgMwg","25-843-787-7479",4641.08
+5,"Supplier#000000005","Gcdm2rJRzl5qlTVzc","21-151-690-3663",-283.84
+6,"Supplier#000000006","tQxuVm7s7CnK","24-696-997-4969",1365.79
+7,"Supplier#000000007","s,4TicNGB4uO6PaSqNBUq","33-990-965-2201",6820.35
+8,"Supplier#000000008","9Sq4bBH2FQEmaFOocY45sRTxo6yuoG","27-498-742-3860",7627.85
+9,"Supplier#000000009","1KhUgZegwM3ua7dsYmekYBsK","20-403-398-8662",5302.37
+10,"Supplier#000000010","Saygah3gYWMp72i PY","34-852-489-8585",3891.91
+11,"Supplier#000000011","JfwTs,LZrV, M,9C","28-613-996-1505",3393.08
+12,"Supplier#000000012","aLIW  q0HYd","18-179-925-7181",1432.69
+13,"Supplier#000000013","HK71HQyWoqRWOX8GI FpgAifW,2PoH","13-727-620-7813",9107.22
+14,"Supplier#000000014","EXsnO5pTNj4iZRm","25-656-247-5058",9189.82
+15,"Supplier#000000015","olXVbNBfVzRqgokr1T,Ie","18-453-357-6394",308.56
+16,"Supplier#000000016","YjP5C55zHDXL7LalK27zfQnwejdpin4AMpvh","32-822-502-4215",2972.26
+17,"Supplier#000000017","c2d,ESHRSkK3WYnxpgw6aOqN0q","29-601-884-9219",1687.81
+18,"Supplier#000000018","PGGVE5PWAMwKDZw ","26-729-551-1115",7040.82
+19,"Supplier#000000019","edZT3es,nBFD8lBXTGeTl","34-278-310-2731",6150.38
+20,"Supplier#000000020","iybAE,RmTymrZVYaFZva2SH,j","13-715-945-6730",530.82


[08/12] drill git commit: DRILL-5504: Add vector validator to diagnose offset vector issues

Posted by jn...@apache.org.
DRILL-5504: Add vector validator to diagnose offset vector issues

Validates offset vectors in VarChar and repeated vectors. Validates the
special case of repeated VarChar vectors (two layers of offsets.)

Provides two new session variables to turn on validation. One enables
the existing operator (iterator) validation, the other adds vector
validation. This allows validation to occur in a “production” Drill
(without restarting Drill with assertions, as previously required.)

Unit tests validate the validator. Another test validates the
integration, but requires manual steps, so is ignored by default.

This version is first-cut: all work is done within a single class.
Allows back-porting to an earlier version to solve a specific issues. A
revision should move some of the work into generated code (or refactor
vectors to allow outside access), since offset vectors appear for each
subclass; not on a base class that would allow generic operations.

* Added boot-time options to allow enabling vector validation in Maven
unit tests.
* Code cleanup per suggestions.
* Additional (manual) tests for boot-time options and default options.

closes #832


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/d7bc213b
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/d7bc213b
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/d7bc213b

Branch: refs/heads/master
Commit: d7bc213ba7cf9a49657cb0699540ca375014a828
Parents: 7873988
Author: Paul Rogers <pr...@maprtech.com>
Authored: Thu May 11 12:46:15 2017 -0700
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Fri Jun 2 21:43:14 2017 -0700

----------------------------------------------------------------------
 .../org/apache/drill/exec/ExecConstants.java    |  31 +-
 .../drill/exec/physical/impl/ImplCreator.java   |  12 +-
 .../physical/impl/validate/BatchValidator.java  | 208 ++++++++++++
 .../IteratorValidatorBatchIterator.java         |  20 +-
 .../impl/validate/IteratorValidatorCreator.java |  12 +-
 .../server/options/SystemOptionManager.java     |   4 +-
 .../compliant/CompliantTextRecordReader.java    |   1 +
 .../src/main/resources/drill-module.conf        |  10 +
 .../impl/validate/TestBatchValidator.java       | 323 +++++++++++++++++++
 .../impl/validate/TestValidationOptions.java    | 135 ++++++++
 10 files changed, 749 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/d7bc213b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 007e39a..83ffb20 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -447,11 +447,40 @@ public interface ExecConstants {
   String USE_DYNAMIC_UDFS_KEY = "exec.udf.use_dynamic";
   BooleanValidator USE_DYNAMIC_UDFS = new BooleanValidator(USE_DYNAMIC_UDFS_KEY, true);
 
-
   String QUERY_TRANSIENT_STATE_UPDATE_KEY = "exec.query.progress.update";
   BooleanValidator QUERY_TRANSIENT_STATE_UPDATE = new BooleanValidator(QUERY_TRANSIENT_STATE_UPDATE_KEY, true);
 
   String PERSISTENT_TABLE_UMASK = "exec.persistent_table.umask";
   StringValidator PERSISTENT_TABLE_UMASK_VALIDATOR = new StringValidator(PERSISTENT_TABLE_UMASK, "002");
 
+  /**
+   * Enables batch iterator (operator) validation. Validation is normally enabled
+   * only when assertions are enabled. This option enables iterator validation even
+   * if assertions are not enabled. That is, it allows iterator validation even on
+   * a "production" Drill instance.
+   */
+  String ENABLE_ITERATOR_VALIDATION_OPTION = "debug.validate_iterators";
+  BooleanValidator ENABLE_ITERATOR_VALIDATOR = new BooleanValidator(ENABLE_ITERATOR_VALIDATION_OPTION, false);
+
+  /**
+   * Boot-time config option to enable validation. Primarily used for tests.
+   * If true, overrrides the above. (That is validation is done if assertions are on,
+   * if the above session option is set to true, or if this config option is set to true.
+   */
+
+  String ENABLE_ITERATOR_VALIDATION = "drill.exec.debug.validate_iterators";
+
+  /**
+   * When iterator validation is enabled, additionally validates the vectors in
+   * each batch passed to each iterator.
+   */
+  String ENABLE_VECTOR_VALIDATION_OPTION = "debug.validate_vectors";
+  BooleanValidator ENABLE_VECTOR_VALIDATOR = new BooleanValidator(ENABLE_VECTOR_VALIDATION_OPTION, false);
+
+  /**
+   * Boot-time config option to enable vector validation. Primarily used for
+   * tests. Add the following to the command line to enable:<br>
+   * <tt>-ea -Ddrill.exec.debug.validate_vectors=true</tt>
+   */
+  String ENABLE_VECTOR_VALIDATION = "drill.exec.debug.validate_vectors";
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/d7bc213b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ImplCreator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ImplCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ImplCreator.java
index 5872ef1..58bf383 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ImplCreator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ImplCreator.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -26,6 +26,7 @@ import java.util.concurrent.TimeUnit;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.drill.common.AutoCloseables;
 import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.physical.base.FragmentRoot;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
@@ -69,9 +70,16 @@ public class ImplCreator {
     Preconditions.checkNotNull(root);
     Preconditions.checkNotNull(context);
 
-    if (AssertionUtil.isAssertionsEnabled()) {
+    // Enable iterator (operator) validation if assertions are enabled (debug mode)
+    // or if in production mode and the ENABLE_ITERATOR_VALIDATION option is set
+    // to true.
+
+    if (AssertionUtil.isAssertionsEnabled() ||
+        context.getOptionSet().getOption(ExecConstants.ENABLE_ITERATOR_VALIDATOR) ||
+        context.getConfig().getBoolean(ExecConstants.ENABLE_ITERATOR_VALIDATION)) {
       root = IteratorValidatorInjector.rewritePlanWithIteratorValidator(context, root);
     }
+
     final ImplCreator creator = new ImplCreator();
     Stopwatch watch = Stopwatch.createStarted();
 

http://git-wip-us.apache.org/repos/asf/drill/blob/d7bc213b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/BatchValidator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/BatchValidator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/BatchValidator.java
new file mode 100644
index 0000000..e0f3ff2
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/BatchValidator.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ ******************************************************************************/
+package org.apache.drill.exec.physical.impl.validate;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.drill.exec.record.SimpleVectorWrapper;
+import org.apache.drill.exec.record.VectorAccessible;
+import org.apache.drill.exec.record.VectorWrapper;
+import org.apache.drill.exec.vector.BaseDataValueVector;
+import org.apache.drill.exec.vector.FixedWidthVector;
+import org.apache.drill.exec.vector.NullableVarCharVector;
+import org.apache.drill.exec.vector.NullableVector;
+import org.apache.drill.exec.vector.RepeatedVarCharVector;
+import org.apache.drill.exec.vector.UInt4Vector;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.VarCharVector;
+import org.apache.drill.exec.vector.VariableWidthVector;
+import org.apache.drill.exec.vector.complex.BaseRepeatedValueVector;
+import org.apache.drill.exec.vector.complex.RepeatedFixedWidthVectorLike;
+
+
+/**
+ * Validate a batch of value vectors. It is not possible to validate the
+ * data, but we can validate the structure, especially offset vectors.
+ * Only handles single (non-hyper) vectors at present. Current form is
+ * self-contained. Better checks can be done by moving checks inside
+ * vectors or by exposing more metadata from vectors.
+ */
+
+public class BatchValidator {
+  private static final org.slf4j.Logger logger =
+      org.slf4j.LoggerFactory.getLogger(BatchValidator.class);
+
+  public static final int MAX_ERRORS = 100;
+
+  private final int rowCount;
+  private final VectorAccessible batch;
+  private final List<String> errorList;
+  private int errorCount;
+
+  public BatchValidator(VectorAccessible batch) {
+    rowCount = batch.getRecordCount();
+    this.batch = batch;
+    errorList = null;
+  }
+
+  public BatchValidator(VectorAccessible batch, boolean captureErrors) {
+    rowCount = batch.getRecordCount();
+    this.batch = batch;
+    if (captureErrors) {
+      errorList = new ArrayList<>();
+    } else {
+      errorList = null;
+    }
+  }
+
+  public void validate() {
+    if (batch.getRecordCount() == 0) {
+      return;
+    }
+    for (VectorWrapper<? extends ValueVector> w : batch) {
+      validateWrapper(w);
+    }
+  }
+
+  private void validateWrapper(VectorWrapper<? extends ValueVector> w) {
+    if (w instanceof SimpleVectorWrapper) {
+      validateVector(w.getValueVector());
+    }
+  }
+
+  private void validateVector(ValueVector vector) {
+    String name = vector.getField().getName();
+    if (vector instanceof NullableVector) {
+      validateNullableVector(name, (NullableVector) vector);
+    } else if (vector instanceof VariableWidthVector) {
+      validateVariableWidthVector(name, (VariableWidthVector) vector, rowCount);
+    } else if (vector instanceof FixedWidthVector) {
+      validateFixedWidthVector(name, (FixedWidthVector) vector);
+    } else if (vector instanceof BaseRepeatedValueVector) {
+      validateRepeatedVector(name, (BaseRepeatedValueVector) vector);
+    } else {
+      logger.debug("Don't know how to validate vector: " + name + " of class " + vector.getClass().getSimpleName());
+    }
+  }
+
+  private void validateVariableWidthVector(String name, VariableWidthVector vector, int entryCount) {
+
+    // Offsets are in the derived classes. Handle only VarChar for now.
+
+    if (vector instanceof VarCharVector) {
+      validateVarCharVector(name, (VarCharVector) vector, entryCount);
+    } else {
+      logger.debug("Don't know how to validate vector: " + name + " of class " + vector.getClass().getSimpleName());
+    }
+  }
+
+  private void validateVarCharVector(String name, VarCharVector vector, int entryCount) {
+//    int dataLength = vector.getAllocatedByteCount(); // Includes offsets and data.
+    int dataLength = vector.getBuffer().capacity();
+    validateOffsetVector(name + "-offsets", vector.getOffsetVector(), entryCount, dataLength);
+  }
+
+  private void validateRepeatedVector(String name, BaseRepeatedValueVector vector) {
+
+    int dataLength = Integer.MAX_VALUE;
+    if (vector instanceof RepeatedVarCharVector) {
+      dataLength = ((RepeatedVarCharVector) vector).getOffsetVector().getValueCapacity();
+    } else if (vector instanceof RepeatedFixedWidthVectorLike) {
+      dataLength = ((BaseDataValueVector) ((BaseRepeatedValueVector) vector).getDataVector()).getBuffer().capacity();
+    }
+    int itemCount = validateOffsetVector(name + "-offsets", vector.getOffsetVector(), rowCount, dataLength);
+
+    // Special handling of repeated VarChar vectors
+    // The nested data vectors are not quite exactly like top-level vectors.
+
+    @SuppressWarnings("resource")
+    ValueVector dataVector = vector.getDataVector();
+    if (dataVector instanceof VariableWidthVector) {
+      validateVariableWidthVector(name + "-data", (VariableWidthVector) dataVector, itemCount);
+    }
+  }
+
+  private int validateOffsetVector(String name, UInt4Vector offsetVector, int valueCount, int maxOffset) {
+    if (valueCount == 0) {
+      return 0;
+    }
+    UInt4Vector.Accessor accessor = offsetVector.getAccessor();
+
+    // First value must be zero in current version.
+
+    int prevOffset = accessor.get(0);
+    if (prevOffset != 0) {
+      error(name, offsetVector, "Offset (0) must be 0 but was " + prevOffset);
+    }
+
+    // Note <= comparison: offset vectors have (n+1) entries.
+
+    for (int i = 1; i <= valueCount; i++) {
+      int offset = accessor.get(i);
+      if (offset < prevOffset) {
+        error(name, offsetVector, "Decreasing offsets at (" + (i-1) + ", " + i + ") = (" + prevOffset + ", " + offset + ")");
+      } else if (offset > maxOffset) {
+        error(name, offsetVector, "Invalid offset at index " + i + " = " + offset + " exceeds maximum of " + maxOffset);
+      }
+      prevOffset = offset;
+    }
+    return prevOffset;
+  }
+
+  private void error(String name, ValueVector vector, String msg) {
+    if (errorCount == 0) {
+      logger.error("Found one or more vector errors from " + batch.getClass().getSimpleName());
+    }
+    errorCount++;
+    if (errorCount >= MAX_ERRORS) {
+      return;
+    }
+    String fullMsg = "Column " + name + " of type " + vector.getClass().getSimpleName( ) + ": " + msg;
+    logger.error(fullMsg);
+    if (errorList != null) {
+      errorList.add(fullMsg);
+    }
+  }
+
+  private void validateNullableVector(String name, NullableVector vector) {
+    // Can't validate at this time because the bits vector is in each
+    // generated subtype.
+
+    // Validate a VarChar vector because it is common.
+
+    if (vector instanceof NullableVarCharVector) {
+      @SuppressWarnings("resource")
+      VarCharVector values = ((NullableVarCharVector) vector).getValuesVector();
+      validateVarCharVector(name + "-values", values, rowCount);
+    }
+  }
+
+  private void validateFixedWidthVector(String name, FixedWidthVector vector) {
+    // TODO Auto-generated method stub
+
+  }
+
+  /**
+   * Obtain the list of errors. For use in unit-testing this class.
+   * @return the list of errors found, or null if error capture was
+   * not enabled
+   */
+
+  public List<String> errors() { return errorList; }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d7bc213b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorBatchIterator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorBatchIterator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorBatchIterator.java
index 01c3c92..0d7fccc 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorBatchIterator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorBatchIterator.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -94,6 +94,11 @@ public class IteratorValidatorBatchIterator implements CloseableRecordBatch {
   /** High-level IterOutcome sequence state. */
   private ValidationState validationState = ValidationState.INITIAL_NO_SCHEMA;
 
+  /**
+   * Enable/disable per-batch vector validation. Enable only to debug vector
+   * corruption issues.
+   */
+  private boolean validateBatches;
 
   public IteratorValidatorBatchIterator(RecordBatch incoming) {
     this.incoming = incoming;
@@ -103,6 +108,11 @@ public class IteratorValidatorBatchIterator implements CloseableRecordBatch {
     logger.trace( "[#{}; on {}]: Being constructed.", instNum, batchTypeName);
   }
 
+
+  public void enableBatchValidation(boolean option) {
+    validateBatches = option;
+  }
+
   @Override
   public String toString() {
     return
@@ -224,6 +234,7 @@ public class IteratorValidatorBatchIterator implements CloseableRecordBatch {
           // above).
           // OK_NEW_SCHEMA moves to have-seen-schema state.
           validationState = ValidationState.HAVE_SCHEMA;
+          validateBatch();
           break;
         case OK:
           // OK is allowed as long as OK_NEW_SCHEMA was seen, except if terminated
@@ -234,6 +245,7 @@ public class IteratorValidatorBatchIterator implements CloseableRecordBatch {
                     "next() returned %s without first returning %s [#%d, %s]",
                     batchState, OK_NEW_SCHEMA, instNum, batchTypeName));
           }
+          validateBatch();
           // OK doesn't change high-level state.
           break;
         case NONE:
@@ -326,6 +338,12 @@ public class IteratorValidatorBatchIterator implements CloseableRecordBatch {
     }
   }
 
+  private void validateBatch() {
+    if (validateBatches) {
+      new BatchValidator(incoming).validate();
+    }
+  }
+
   @Override
   public WritableBatch getWritableBatch() {
     validateReadState("getWritableBatch()");

http://git-wip-us.apache.org/repos/asf/drill/blob/d7bc213b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorCreator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorCreator.java
index cc30326..2288419 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorCreator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/validate/IteratorValidatorCreator.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,6 +20,7 @@ package org.apache.drill.exec.physical.impl.validate;
 import java.util.List;
 
 import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.physical.config.IteratorValidator;
 import org.apache.drill.exec.physical.impl.BatchCreator;
@@ -35,6 +36,13 @@ public class IteratorValidatorCreator implements BatchCreator<IteratorValidator>
       List<RecordBatch> children)
       throws ExecutionSetupException {
     Preconditions.checkArgument(children.size() == 1);
-    return new IteratorValidatorBatchIterator(children.iterator().next());
+    RecordBatch child = children.iterator().next();
+    IteratorValidatorBatchIterator iter = new IteratorValidatorBatchIterator(child);
+    boolean validateBatches = context.getOptionSet().getOption(ExecConstants.ENABLE_VECTOR_VALIDATOR) ||
+                              context.getConfig().getBoolean(ExecConstants.ENABLE_VECTOR_VALIDATION);
+    iter.enableBatchValidation(validateBatches);
+    logger.trace("Iterator validation enabled for " + child.getClass().getSimpleName() +
+                 (validateBatches ? " with vector validation" : ""));
+    return iter;
   }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/d7bc213b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
index 8d0e96c..4f7ecc2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
@@ -170,7 +170,9 @@ public class SystemOptionManager extends BaseOptionManager implements OptionMana
       ExecConstants.QUERY_PROFILE_DEBUG_VALIDATOR,
       ExecConstants.USE_DYNAMIC_UDFS,
       ExecConstants.QUERY_TRANSIENT_STATE_UPDATE,
-      ExecConstants.PERSISTENT_TABLE_UMASK_VALIDATOR
+      ExecConstants.PERSISTENT_TABLE_UMASK_VALIDATOR,
+      ExecConstants.ENABLE_ITERATOR_VALIDATOR,
+      ExecConstants.ENABLE_VECTOR_VALIDATOR
     };
     final Map<String, OptionValidator> tmp = new HashMap<>();
     for (final OptionValidator validator : validators) {

http://git-wip-us.apache.org/repos/asf/drill/blob/d7bc213b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/compliant/CompliantTextRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/compliant/CompliantTextRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/compliant/CompliantTextRecordReader.java
index e253730..4a35c3b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/compliant/CompliantTextRecordReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/compliant/CompliantTextRecordReader.java
@@ -151,6 +151,7 @@ public class CompliantTextRecordReader extends AbstractRecordReader {
       }
 
       // setup Input using InputStream
+      logger.trace("Opening file {}", split.getPath());
       stream = dfs.openPossiblyCompressedStream(split.getPath());
       input = new TextInput(settings, stream, readBuffer, split.getStart(), split.getStart() + split.getLength());
 

http://git-wip-us.apache.org/repos/asf/drill/blob/d7bc213b/exec/java-exec/src/main/resources/drill-module.conf
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/resources/drill-module.conf b/exec/java-exec/src/main/resources/drill-module.conf
index 19e1b1f..7c095ac 100644
--- a/exec/java-exec/src/main/resources/drill-module.conf
+++ b/exec/java-exec/src/main/resources/drill-module.conf
@@ -187,6 +187,16 @@ drill.exec: {
     // Use plain Java compilation where available
     prefer_plain_java: false
   },
+  debug: {
+    // If true, inserts the iterator validator atop each operator.
+    // Primrily used for testing.
+    validate_iterators: false,
+    // If iterator validation is enabled, also validates the vectors
+    // in each batch. Primarily used for testing. To enable from
+    // the command line:
+    // java ... -ea -Ddrill.exec.debug.validate_vectors=true ...
+    validate_vectors: false
+  },
   sort: {
     purge.threshold : 1000,
     external: {

http://git-wip-us.apache.org/repos/asf/drill/blob/d7bc213b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/validate/TestBatchValidator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/validate/TestBatchValidator.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/validate/TestBatchValidator.java
new file mode 100644
index 0000000..eafb4c8
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/validate/TestBatchValidator.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ ******************************************************************************/
+package org.apache.drill.exec.physical.impl.validate;
+
+import static org.junit.Assert.*;
+
+import java.util.List;
+
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.physical.impl.validate.BatchValidator;
+import org.apache.drill.exec.record.BatchSchema;
+import org.apache.drill.exec.record.VectorAccessible;
+import org.apache.drill.exec.vector.RepeatedVarCharVector;
+import org.apache.drill.exec.vector.UInt4Vector;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.VarCharVector;
+import org.apache.drill.test.LogFixture;
+import org.apache.drill.test.OperatorFixture;
+import org.apache.drill.test.rowSet.RowSet.SingleRowSet;
+import org.apache.drill.test.rowSet.SchemaBuilder;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import ch.qos.logback.classic.Level;
+
+public class TestBatchValidator /* TODO: extends SubOperatorTest */ {
+
+  protected static OperatorFixture fixture;
+  protected static LogFixture logFixture;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    logFixture = LogFixture.builder()
+        .toConsole()
+        .logger(BatchValidator.class, Level.TRACE)
+        .build();
+    fixture = OperatorFixture.standardFixture();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    fixture.close();
+    logFixture.close();
+  }
+
+  @Test
+  public void testValidFixed() {
+    BatchSchema schema = new SchemaBuilder()
+        .add("a", MinorType.INT)
+        .addNullable("b", MinorType.INT)
+        .build();
+
+    SingleRowSet batch = fixture.rowSetBuilder(schema)
+        .add(10, 100)
+        .add(20, 120)
+        .add(30, null)
+        .add(40, 140)
+        .build();
+
+    BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
+    validator.validate();
+    assertTrue(validator.errors().isEmpty());
+    batch.clear();
+  }
+
+  @Test
+  public void testValidVariable() {
+    BatchSchema schema = new SchemaBuilder()
+        .add("a", MinorType.VARCHAR)
+        .addNullable("b", MinorType.VARCHAR)
+        .build();
+
+    SingleRowSet batch = fixture.rowSetBuilder(schema)
+        .add("col1.1", "col1.2")
+        .add("col2.1", "col2.2")
+        .add("col3.1", null)
+        .add("col4.1", "col4.2")
+        .build();
+
+    BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
+    validator.validate();
+    assertTrue(validator.errors().isEmpty());
+    batch.clear();
+  }
+
+  @Test
+  public void testValidRepeated() {
+    BatchSchema schema = new SchemaBuilder()
+        .add("a", MinorType.INT, DataMode.REPEATED)
+        .add("b", MinorType.VARCHAR, DataMode.REPEATED)
+        .build();
+
+    SingleRowSet batch = fixture.rowSetBuilder(schema)
+        .add(new int[] {}, new String[] {})
+        .add(new int[] {1, 2, 3}, new String[] {"fred", "barney", "wilma"})
+        .add(new int[] {4}, new String[] {"dino"})
+        .build();
+
+    BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
+    validator.validate();
+    assertTrue(validator.errors().isEmpty());
+    batch.clear();
+  }
+
+  @Test
+  public void testVariableMissingLast() {
+    BatchSchema schema = new SchemaBuilder()
+        .add("a", MinorType.VARCHAR)
+        .build();
+
+    SingleRowSet batch = fixture.rowSetBuilder(schema)
+        .add("x")
+        .add("y")
+        .add("z")
+        .build();
+
+    // Here we are evil: stomp on the last offset to simulate corruption.
+    // Don't do this in real code!
+
+    VectorAccessible va = batch.vectorAccessible();
+    @SuppressWarnings("resource")
+    ValueVector v = va.iterator().next().getValueVector();
+    VarCharVector vc = (VarCharVector) v;
+    @SuppressWarnings("resource")
+    UInt4Vector ov = vc.getOffsetVector();
+    assertTrue(ov.getAccessor().get(3) > 0);
+    ov.getMutator().set(3, 0);
+
+    // Validator should catch the error.
+
+    BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
+    validator.validate();
+    List<String> errors = validator.errors();
+    assertEquals(1, errors.size());
+    assertTrue(errors.get(0).contains("Decreasing offsets"));
+    batch.clear();
+  }
+
+  @Test
+  public void testVariableCorruptFirst() {
+    BatchSchema schema = new SchemaBuilder()
+        .add("a", MinorType.VARCHAR)
+        .build();
+
+    SingleRowSet batch = fixture.rowSetBuilder(schema)
+        .add("x")
+        .add("y")
+        .add("z")
+        .build();
+
+    zapOffset(batch, 0, 1);
+
+    // Validator should catch the error.
+
+    BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
+    validator.validate();
+    List<String> errors = validator.errors();
+    assertEquals(1, errors.size());
+    assertTrue(errors.get(0).contains("Offset (0) must be 0"));
+    batch.clear();
+  }
+
+  public void zapOffset(SingleRowSet batch, int index, int bogusValue) {
+
+    // Here we are evil: stomp on an offset to simulate corruption.
+    // Don't do this in real code!
+
+    VectorAccessible va = batch.vectorAccessible();
+    @SuppressWarnings("resource")
+    ValueVector v = va.iterator().next().getValueVector();
+    VarCharVector vc = (VarCharVector) v;
+    @SuppressWarnings("resource")
+    UInt4Vector ov = vc.getOffsetVector();
+    ov.getMutator().set(index, bogusValue);
+  }
+
+  @Test
+  public void testVariableCorruptMiddleLow() {
+    BatchSchema schema = new SchemaBuilder()
+        .add("a", MinorType.VARCHAR)
+        .build();
+
+    SingleRowSet batch = fixture.rowSetBuilder(schema)
+        .add("xx")
+        .add("yy")
+        .add("zz")
+        .build();
+
+    zapOffset(batch, 2, 1);
+
+    // Validator should catch the error.
+
+    BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
+    validator.validate();
+    List<String> errors = validator.errors();
+    assertEquals(1, errors.size());
+    assertTrue(errors.get(0).contains("Decreasing offsets"));
+    batch.clear();
+  }
+
+  @Test
+  public void testVariableCorruptMiddleHigh() {
+    BatchSchema schema = new SchemaBuilder()
+        .add("a", MinorType.VARCHAR)
+        .build();
+
+    SingleRowSet batch = fixture.rowSetBuilder(schema)
+        .add("xx")
+        .add("yy")
+        .add("zz")
+        .build();
+
+    zapOffset(batch, 1, 10);
+
+    // Validator should catch the error.
+
+    BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
+    validator.validate();
+    List<String> errors = validator.errors();
+    assertEquals(1, errors.size());
+    assertTrue(errors.get(0).contains("Decreasing offsets"));
+    batch.clear();
+  }
+
+  @Test
+  public void testVariableCorruptLastOutOfRange() {
+    BatchSchema schema = new SchemaBuilder()
+        .add("a", MinorType.VARCHAR)
+        .build();
+
+    SingleRowSet batch = fixture.rowSetBuilder(schema)
+        .add("xx")
+        .add("yy")
+        .add("zz")
+        .build();
+
+    zapOffset(batch, 3, 100_000);
+
+    // Validator should catch the error.
+
+    BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
+    validator.validate();
+    List<String> errors = validator.errors();
+    assertEquals(1, errors.size());
+    assertTrue(errors.get(0).contains("Invalid offset"));
+    batch.clear();
+  }
+
+  @Test
+  public void testRepeatedBadArrayOffset() {
+    BatchSchema schema = new SchemaBuilder()
+        .add("a", MinorType.VARCHAR, DataMode.REPEATED)
+        .build();
+
+    SingleRowSet batch = fixture.rowSetBuilder(schema)
+        .add((Object) new String[] {})
+        .add((Object) new String[] {"fred", "barney", "wilma"})
+        .add((Object) new String[] {"dino"})
+        .build();
+
+    VectorAccessible va = batch.vectorAccessible();
+    @SuppressWarnings("resource")
+    ValueVector v = va.iterator().next().getValueVector();
+    RepeatedVarCharVector vc = (RepeatedVarCharVector) v;
+    @SuppressWarnings("resource")
+    UInt4Vector ov = vc.getOffsetVector();
+    ov.getMutator().set(3, 1);
+
+    BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
+    validator.validate();
+    List<String> errors = validator.errors();
+    assertEquals(1, errors.size());
+    assertTrue(errors.get(0).contains("Decreasing offsets"));
+    batch.clear();
+  }
+
+  @Test
+  public void testRepeatedBadValueOffset() {
+    BatchSchema schema = new SchemaBuilder()
+        .add("a", MinorType.VARCHAR, DataMode.REPEATED)
+        .build();
+
+    SingleRowSet batch = fixture.rowSetBuilder(schema)
+        .add((Object) new String[] {})
+        .add((Object) new String[] {"fred", "barney", "wilma"})
+        .add((Object) new String[] {"dino"})
+        .build();
+
+    VectorAccessible va = batch.vectorAccessible();
+    @SuppressWarnings("resource")
+    ValueVector v = va.iterator().next().getValueVector();
+    RepeatedVarCharVector rvc = (RepeatedVarCharVector) v;
+    @SuppressWarnings("resource")
+    VarCharVector vc = rvc.getDataVector();
+    @SuppressWarnings("resource")
+    UInt4Vector ov = vc.getOffsetVector();
+    ov.getMutator().set(4, 100_000);
+
+    BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
+    validator.validate();
+    List<String> errors = validator.errors();
+    assertEquals(1, errors.size());
+    assertTrue(errors.get(0).contains("Invalid offset"));
+    batch.clear();
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/d7bc213b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/validate/TestValidationOptions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/validate/TestValidationOptions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/validate/TestValidationOptions.java
new file mode 100644
index 0000000..d4e33b0
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/validate/TestValidationOptions.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.physical.impl.validate;
+
+import static org.junit.Assert.assertFalse;
+
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.store.easy.text.compliant.CompliantTextRecordReader;
+import org.apache.drill.test.ClientFixture;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.DrillTest;
+import org.apache.drill.test.FixtureBuilder;
+import org.apache.drill.test.LogFixture;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import ch.qos.logback.classic.Level;
+
+@Ignore("requires manual verification")
+public class TestValidationOptions extends DrillTest {
+
+  protected static LogFixture logFixture;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    logFixture = LogFixture.builder()
+        .toConsole()
+        .logger(BatchValidator.class, Level.TRACE)
+        .logger(IteratorValidatorCreator.class, Level.TRACE)
+        .logger(CompliantTextRecordReader.class, Level.TRACE)
+        .build();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    logFixture.close();
+  }
+
+  // To validate these tests, set breakpoints in ImplCreator
+  // and IteratorValidatorBatchIterator to see if the options
+  // work as expected.
+
+  @Test
+  public void testOptions() throws Exception {
+    FixtureBuilder builder = ClusterFixture.builder()
+        .maxParallelization(1)
+        .configProperty(ExecConstants.ENABLE_ITERATOR_VALIDATION, false)
+        .configProperty(ExecConstants.ENABLE_VECTOR_VALIDATION, false)
+        .sessionOption(ExecConstants.ENABLE_ITERATOR_VALIDATION_OPTION, true)
+        .sessionOption(ExecConstants.ENABLE_VECTOR_VALIDATION_OPTION, true)
+        ;
+    try (ClusterFixture cluster = builder.build();
+         ClientFixture client = cluster.clientFixture()) {
+
+      boolean hasAssertions = false;
+      assert hasAssertions = true;
+      assertFalse(hasAssertions);
+      String sql = "SELECT id_i, name_s10 FROM `mock`.`customers_10`";
+      client.queryBuilder().sql(sql).run();
+
+      client.alterSession(ExecConstants.ENABLE_VECTOR_VALIDATION, false);
+      client.queryBuilder().sql(sql).run();
+
+      client.alterSession(ExecConstants.ENABLE_ITERATOR_VALIDATION, false);
+      client.queryBuilder().sql(sql).run();
+    }
+  }
+
+  /**
+   * Config options override session options. Config options allow passing in
+   * the setting at run time on the command line. This is a work-around for the
+   * fact that the config system has no generic solution at present.
+   *
+   * @throws Exception if anything goes wrong
+   */
+
+  @Test
+  public void testConfig() throws Exception {
+    FixtureBuilder builder = ClusterFixture.builder()
+        .maxParallelization(1)
+        .configProperty(ExecConstants.ENABLE_ITERATOR_VALIDATION, true)
+        .configProperty(ExecConstants.ENABLE_VECTOR_VALIDATION, true)
+        .sessionOption(ExecConstants.ENABLE_ITERATOR_VALIDATION_OPTION, false)
+        .sessionOption(ExecConstants.ENABLE_VECTOR_VALIDATION_OPTION, false)
+        ;
+    try (ClusterFixture cluster = builder.build();
+         ClientFixture client = cluster.clientFixture()) {
+
+      boolean hasAssertions = false;
+      assert hasAssertions = true;
+      assertFalse(hasAssertions);
+      String sql = "SELECT id_i, name_s10 FROM `mock`.`customers_10`";
+      client.queryBuilder().sql(sql).run();
+    }
+  }
+
+  /**
+   * Should do no validation with all-default options.
+   *
+   * @throws Exception
+   */
+
+  @Test
+  public void testDefaults() throws Exception {
+    FixtureBuilder builder = ClusterFixture.builder()
+        .maxParallelization(1)
+        ;
+    try (ClusterFixture cluster = builder.build();
+         ClientFixture client = cluster.clientFixture()) {
+
+      boolean hasAssertions = false;
+      assert hasAssertions = true;
+      assertFalse(hasAssertions);
+      String sql = "SELECT id_i, name_s10 FROM `mock`.`customers_10`";
+      client.queryBuilder().sql(sql).run();
+    }
+  }
+}


[02/12] drill git commit: DRILL-5485: Remove WebServer dependency on DrillClient

Posted by jn...@apache.org.
DRILL-5485: Remove WebServer dependency on DrillClient

1. Added WebUserConnection/AnonWebUserConnection and their providers for Authenticated and Anonymous web users.
2. Updated to store the UserSession, BufferAllocator and other session states inside the HttpSession of Jetty instead
	of storing in DrillUserPrincipal. For each request now a new instance of WebUserConnection will be created. However
	for authenticated users the UserSession and other states will be re-used whereas for Anonymous Users it will created
	for each request and later re-cycled after query execution.

close #829


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/874bf629
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/874bf629
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/874bf629

Branch: refs/heads/master
Commit: 874bf6296dcd1a42c7cf7f097c1a6b5458010cbb
Parents: d38917b
Author: Sorabh Hamirwasia <sh...@maprtech.com>
Authored: Fri Apr 21 18:34:19 2017 -0700
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Fri Jun 2 21:43:14 2017 -0700

----------------------------------------------------------------------
 .../org/apache/drill/exec/ExecConstants.java    |   2 +
 .../exec/ops/AccountingUserConnection.java      |   4 +-
 .../apache/drill/exec/ops/FragmentContext.java  |   2 +-
 .../apache/drill/exec/opt/BasicOptimizer.java   |   2 +-
 .../AbstractDisposableUserClientConnection.java | 107 ++++++++++++
 .../drill/exec/rpc/UserClientConnection.java    |  69 ++++++++
 .../apache/drill/exec/rpc/user/UserServer.java  |  42 +----
 .../drill/exec/server/DrillbitContext.java      |  16 +-
 .../drill/exec/server/rest/DrillRestServer.java | 149 ++++++++++++++++-
 .../drill/exec/server/rest/QueryResources.java  |  51 +++---
 .../drill/exec/server/rest/QueryWrapper.java    | 157 ++++--------------
 .../drill/exec/server/rest/WebServer.java       |  32 ++--
 .../exec/server/rest/WebSessionResources.java   |  84 ++++++++++
 .../exec/server/rest/WebUserConnection.java     | 164 +++++++++++++++++++
 .../rest/auth/AbstractDrillLoginService.java    |  95 -----------
 .../server/rest/auth/DrillRestLoginService.java |  75 +++++++--
 .../server/rest/auth/DrillUserPrincipal.java    |  82 ++--------
 .../apache/drill/exec/work/foreman/Foreman.java |   2 +-
 .../work/prepare/PreparedStatementProvider.java |  78 +++------
 .../drill/exec/work/user/PlanSplitter.java      |   2 +-
 .../apache/drill/exec/work/user/UserWorker.java |   2 +-
 .../src/main/resources/drill-module.conf        |   6 +
 .../apache/drill/exec/client/DumpCatTest.java   |   2 +-
 .../drill/exec/fn/impl/TestMathFunctions.java   |   2 +-
 .../drill/exec/fn/impl/TestMultiInputAdd.java   |   4 +-
 .../exec/fn/impl/TestNewMathFunctions.java      |  12 +-
 .../exec/fn/impl/TestRepeatedFunction.java      |   2 +-
 .../exec/physical/impl/TestCastFunctions.java   |  18 +-
 .../physical/impl/TestComparisonFunctions.java  |  16 +-
 .../physical/impl/TestConvertFunctions.java     |  12 +-
 .../impl/TestImplicitCastFunctions.java         |  10 +-
 .../exec/physical/impl/TestOptiqPlans.java      |   2 +-
 .../physical/impl/TestReverseImplicitCast.java  |   4 +-
 .../exec/physical/impl/TestSimpleFunctions.java |   8 +-
 .../exec/physical/impl/TestStringFunctions.java |  40 ++---
 .../drill/exec/physical/impl/agg/TestAgg.java   |   2 +-
 .../physical/impl/filter/TestSimpleFilter.java  |   2 +-
 .../exec/physical/impl/join/TestHashJoin.java   |  16 +-
 .../exec/physical/impl/join/TestMergeJoin.java  |  11 +-
 .../physical/impl/limit/TestSimpleLimit.java    |  12 +-
 .../impl/project/TestSimpleProjection.java      |   2 +-
 .../exec/physical/impl/sort/TestSimpleSort.java |   2 +-
 .../impl/trace/TestTraceMultiRecordBatch.java   |   2 +-
 .../impl/trace/TestTraceOutputDump.java         |   2 +-
 .../physical/impl/union/TestSimpleUnion.java    |   4 +-
 .../drill/exec/record/TestRecordIterator.java   |   6 +-
 .../store/parquet/ParquetRecordReaderTest.java  |   4 +-
 47 files changed, 871 insertions(+), 549 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 7c681c1..18f69d5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -99,6 +99,8 @@ public interface ExecConstants {
   String HTTP_CORS_ALLOWED_METHODS = "drill.exec.http.cors.allowedMethods";
   String HTTP_CORS_ALLOWED_HEADERS = "drill.exec.http.cors.allowedHeaders";
   String HTTP_CORS_CREDENTIALS = "drill.exec.http.cors.credentials";
+  String HTTP_SESSION_MEMORY_RESERVATION = "drill.exec.http.session.memory.reservation";
+  String HTTP_SESSION_MEMORY_MAXIMUM = "drill.exec.http.session.memory.maximum";
   String HTTP_SESSION_MAX_IDLE_SECS = "drill.exec.http.session_max_idle_secs";
   String HTTP_KEYSTORE_PATH = "javax.net.ssl.keyStore";
   String HTTP_KEYSTORE_PASSWORD = "javax.net.ssl.keyStorePassword";

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/ops/AccountingUserConnection.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/AccountingUserConnection.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/AccountingUserConnection.java
index e3add13..7a01fcd 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/AccountingUserConnection.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/AccountingUserConnection.java
@@ -20,10 +20,10 @@ package org.apache.drill.exec.ops;
 import org.apache.drill.exec.physical.impl.materialize.QueryWritableBatch;
 import org.apache.drill.exec.proto.GeneralRPCProtos.Ack;
 import org.apache.drill.exec.rpc.RpcOutcomeListener;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 
 /**
- * Wrapper around a {@link org.apache.drill.exec.rpc.user.UserServer.UserClientConnection} that tracks the status of batches
+ * Wrapper around a {@link UserClientConnection} that tracks the status of batches
  * sent to User.
  */
 public class AccountingUserConnection {

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentContext.java
index 8335547..badf70c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/FragmentContext.java
@@ -48,7 +48,7 @@ import org.apache.drill.exec.proto.helper.QueryIdHelper;
 import org.apache.drill.exec.rpc.RpcException;
 import org.apache.drill.exec.rpc.RpcOutcomeListener;
 import org.apache.drill.exec.rpc.control.ControlTunnel;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.FragmentOptionManager;
 import org.apache.drill.exec.server.options.OptionList;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/opt/BasicOptimizer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/opt/BasicOptimizer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/opt/BasicOptimizer.java
index 27c853a..2a378ff 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/opt/BasicOptimizer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/opt/BasicOptimizer.java
@@ -53,7 +53,7 @@ import org.apache.drill.exec.physical.config.SelectionVectorRemover;
 import org.apache.drill.exec.physical.config.Sort;
 import org.apache.drill.exec.physical.config.StreamingAggregate;
 import org.apache.drill.exec.physical.config.WindowPOP;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.store.StoragePlugin;
 import org.apache.calcite.rel.RelFieldCollation.Direction;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/AbstractDisposableUserClientConnection.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/AbstractDisposableUserClientConnection.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/AbstractDisposableUserClientConnection.java
new file mode 100644
index 0000000..33536c6
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/AbstractDisposableUserClientConnection.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.rpc;
+
+import com.google.common.base.Preconditions;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.exceptions.UserRemoteException;
+import org.apache.drill.exec.proto.GeneralRPCProtos.Ack;
+import org.apache.drill.exec.proto.UserBitShared.DrillPBError;
+import org.apache.drill.exec.proto.UserBitShared.QueryId;
+import org.apache.drill.exec.proto.UserBitShared.QueryResult;
+import org.apache.drill.exec.proto.UserBitShared.QueryResult.QueryState;
+import org.apache.drill.exec.proto.helper.QueryIdHelper;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Helps to run a query and await on the results. All the inheriting sub-class manages the session/connection
+ * state and submits query with respect to that state. The subclass instance lifetime is per query lifetime
+ * and is not re-used.
+ */
+public abstract class AbstractDisposableUserClientConnection implements UserClientConnection {
+  private static final org.slf4j.Logger logger =
+      org.slf4j.LoggerFactory.getLogger(AbstractDisposableUserClientConnection.class);
+
+  protected final CountDownLatch latch = new CountDownLatch(1);
+
+  protected volatile DrillPBError error;
+
+  protected volatile UserException exception;
+
+  /**
+   * Wait until the query has completed or timeout is passed.
+   *
+   * @throws InterruptedException
+   */
+  public boolean await(final long timeoutMillis) throws InterruptedException {
+    return latch.await(timeoutMillis, TimeUnit.MILLISECONDS);
+  }
+
+  /**
+   * Wait indefinitely until the query is completed. Used only in case of WebUser
+   *
+   * @throws Exception
+   */
+  public void await() throws Exception {
+    latch.await();
+    if (exception != null) {
+      throw exception;
+    }
+  }
+
+  @Override
+  public void sendResult(RpcOutcomeListener<Ack> listener, QueryResult result) {
+
+    Preconditions.checkState(result.hasQueryState());
+
+    // Release the wait latch if the query is terminated.
+    final QueryState state = result.getQueryState();
+    final QueryId queryId = result.getQueryId();
+
+    if (logger.isDebugEnabled()) {
+      logger.debug("Result arrived for QueryId: {} with QueryState: {}", QueryIdHelper.getQueryId(queryId), state);
+    }
+
+    switch (state) {
+      case FAILED:
+        error = result.getError(0);
+        exception = new UserRemoteException(error);
+        latch.countDown();
+        break;
+      case CANCELED:
+      case COMPLETED:
+        Preconditions.checkState(result.getErrorCount() == 0);
+        latch.countDown();
+        break;
+      default:
+        logger.error("Query with QueryId: {} is in unexpected state: {}", queryId, state);
+    }
+
+    // Notify the listener with ACK
+    listener.success(Acks.OK, null);
+  }
+
+  /**
+   * @return Any error returned in query execution.
+   */
+  public DrillPBError getError() {
+    return error;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/UserClientConnection.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/UserClientConnection.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/UserClientConnection.java
new file mode 100644
index 0000000..43247f8
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/UserClientConnection.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.rpc;
+
+import io.netty.channel.ChannelFuture;
+import org.apache.drill.exec.physical.impl.materialize.QueryWritableBatch;
+import org.apache.drill.exec.proto.GeneralRPCProtos.Ack;
+import org.apache.drill.exec.proto.UserBitShared.QueryResult;
+import org.apache.drill.exec.rpc.user.UserSession;
+
+import java.net.SocketAddress;
+
+/**
+ * Interface for getting user session properties and interacting with user connection. Separating this interface from
+ * {@link AbstractRemoteConnection} implementation for user connection:
+ * <p><ul>
+ * <li> Connection is passed to Foreman and Screen operators. Instead passing this interface exposes few details.
+ * <li> Makes it easy to have wrappers around user connection which can be helpful to tap the messages and data
+ * going to the actual client.
+ * </ul>
+ */
+public interface UserClientConnection {
+  /**
+   * @return User session object.
+   */
+  UserSession getSession();
+
+  /**
+   * Send query result outcome to client. Outcome is returned through <code>listener</code>
+   *
+   * @param listener
+   * @param result
+   */
+  void sendResult(RpcOutcomeListener<Ack> listener, QueryResult result);
+
+  /**
+   * Send query data to client. Outcome is returned through <code>listener</code>
+   *
+   * @param listener
+   * @param result
+   */
+  void sendData(RpcOutcomeListener<Ack> listener, QueryWritableBatch result);
+
+  /**
+   * Returns the {@link ChannelFuture} which will be notified when this
+   * channel is closed.  This method always returns the same future instance.
+   */
+  ChannelFuture getChannelClosureFuture();
+
+  /**
+   * @return Return the client node address.
+   */
+  SocketAddress getRemoteAddress();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
index 543145f..35dbbe9 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/UserServer.java
@@ -47,6 +47,7 @@ import org.apache.drill.exec.rpc.ProtobufLengthDecoder;
 import org.apache.drill.exec.rpc.RpcConstants;
 import org.apache.drill.exec.rpc.RpcException;
 import org.apache.drill.exec.rpc.RpcOutcomeListener;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.rpc.security.ServerAuthenticationHandler;
 import org.apache.drill.exec.rpc.security.plain.PlainFactory;
 import org.apache.drill.exec.rpc.user.UserServer.BitToUserConnection;
@@ -96,47 +97,6 @@ public class UserServer extends BasicServer<RpcType, BitToUserConnection> {
   }
 
   /**
-   * Interface for getting user session properties and interacting with user connection. Separating this interface from
-   * {@link AbstractRemoteConnection} implementation for user connection:
-   * <p><ul>
-   *   <li> Connection is passed to Foreman and Screen operators. Instead passing this interface exposes few details.
-   *   <li> Makes it easy to have wrappers around user connection which can be helpful to tap the messages and data
-   *        going to the actual client.
-   * </ul>
-   */
-  public interface UserClientConnection {
-    /**
-     * @return User session object.
-     */
-    UserSession getSession();
-
-    /**
-     * Send query result outcome to client. Outcome is returned through <code>listener</code>
-     * @param listener
-     * @param result
-     */
-    void sendResult(RpcOutcomeListener<Ack> listener, QueryResult result);
-
-    /**
-     * Send query data to client. Outcome is returned through <code>listener</code>
-     * @param listener
-     * @param result
-     */
-    void sendData(RpcOutcomeListener<Ack> listener, QueryWritableBatch result);
-
-    /**
-     * Returns the {@link ChannelFuture} which will be notified when this
-     * channel is closed.  This method always returns the same future instance.
-     */
-    ChannelFuture getChannelClosureFuture();
-
-    /**
-     * @return Return the client node address.
-     */
-    SocketAddress getRemoteAddress();
-  }
-
-  /**
    * {@link AbstractRemoteConnection} implementation for user connection. Also implements {@link UserClientConnection}.
    */
   public class BitToUserConnection extends AbstractServerConnection<BitToUserConnection>

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
index b8d3e68..973b97c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
@@ -17,12 +17,8 @@
  */
 package org.apache.drill.exec.server;
 
-import static com.google.common.base.Preconditions.checkNotNull;
+import com.codahale.metrics.MetricRegistry;
 import io.netty.channel.EventLoopGroup;
-
-import java.util.Collection;
-import java.util.concurrent.ExecutorService;
-
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.config.LogicalPlanPersistence;
 import org.apache.drill.common.scanner.persistence.ScanResult;
@@ -38,12 +34,16 @@ import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
 import org.apache.drill.exec.rpc.control.Controller;
 import org.apache.drill.exec.rpc.control.WorkEventBus;
 import org.apache.drill.exec.rpc.data.DataConnectionCreator;
+import org.apache.drill.exec.rpc.security.AuthenticatorProvider;
 import org.apache.drill.exec.server.options.SystemOptionManager;
 import org.apache.drill.exec.store.SchemaFactory;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.drill.exec.store.sys.PersistentStoreProvider;
 
-import com.codahale.metrics.MetricRegistry;
+import java.util.Collection;
+import java.util.concurrent.ExecutorService;
+
+import static com.google.common.base.Preconditions.checkNotNull;
 
 public class DrillbitContext implements AutoCloseable {
 //  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillbitContext.class);
@@ -232,6 +232,10 @@ public class DrillbitContext implements AutoCloseable {
     return table;
   }
 
+  public AuthenticatorProvider getAuthProvider() {
+    return context.getAuthProvider();
+  }
+
   @Override
   public void close() throws Exception {
     getOptionManager().close();

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/DrillRestServer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/DrillRestServer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/DrillRestServer.java
index 0401d58..e88d1b0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/DrillRestServer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/DrillRestServer.java
@@ -17,7 +17,17 @@
  */
 package org.apache.drill.exec.server.rest;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.jaxrs.base.JsonMappingExceptionMapper;
+import com.fasterxml.jackson.jaxrs.base.JsonParseExceptionMapper;
+import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider;
+import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.memory.BufferAllocator;
+import org.apache.drill.exec.proto.UserBitShared;
+import org.apache.drill.exec.rpc.user.UserSession;
+import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.server.rest.WebUserConnection.AnonWebUserConnection;
 import org.apache.drill.exec.server.rest.auth.AuthDynamicFeature;
 import org.apache.drill.exec.server.rest.auth.DrillUserPrincipal;
 import org.apache.drill.exec.server.rest.auth.DrillUserPrincipal.AnonDrillUserPrincipal;
@@ -36,13 +46,13 @@ import org.glassfish.jersey.server.ServerProperties;
 import org.glassfish.jersey.server.filter.RolesAllowedDynamicFeature;
 import org.glassfish.jersey.server.mvc.freemarker.FreemarkerMvcFeature;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.jaxrs.base.JsonMappingExceptionMapper;
-import com.fasterxml.jackson.jaxrs.base.JsonParseExceptionMapper;
-import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider;
-
 import javax.inject.Inject;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpSession;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
+import java.security.Principal;
 
 public class DrillRestServer extends ResourceConfig {
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillRestServer.class);
@@ -70,7 +80,8 @@ public class DrillRestServer extends ResourceConfig {
     }
 
     //disable moxy so it doesn't conflict with jackson.
-    final String disableMoxy = PropertiesHelper.getPropertyNameForRuntime(CommonProperties.MOXY_JSON_FEATURE_DISABLE, getConfiguration().getRuntimeType());
+    final String disableMoxy = PropertiesHelper.getPropertyNameForRuntime(CommonProperties.MOXY_JSON_FEATURE_DISABLE,
+        getConfiguration().getRuntimeType());
     property(disableMoxy, true);
 
     register(JsonParseExceptionMapper.class);
@@ -91,13 +102,136 @@ public class DrillRestServer extends ResourceConfig {
         bind(new UserAuthEnabled(isAuthEnabled)).to(UserAuthEnabled.class);
         if (isAuthEnabled) {
           bindFactory(DrillUserPrincipalProvider.class).to(DrillUserPrincipal.class);
+          bindFactory(AuthWebUserConnectionProvider.class).to(WebUserConnection.class);
         } else {
           bindFactory(AnonDrillUserPrincipalProvider.class).to(DrillUserPrincipal.class);
+          bindFactory(AnonWebUserConnectionProvider.class).to(WebUserConnection.class);
         }
       }
     });
   }
 
+  public static class AuthWebUserConnectionProvider implements Factory<WebUserConnection> {
+
+    @Inject
+    HttpServletRequest request;
+
+    @Inject
+    WorkManager workManager;
+
+    @Override
+    public WebUserConnection provide() {
+      final HttpSession session = request.getSession();
+      final Principal sessionUserPrincipal = request.getUserPrincipal();
+
+      // If there is no valid principal this means user is not logged in yet.
+      if (sessionUserPrincipal == null) {
+        return null;
+      }
+
+      // User is logged in, get/set the WebSessionResources attribute
+      WebSessionResources webSessionResources =
+              (WebSessionResources) session.getAttribute(WebSessionResources.class.getSimpleName());
+
+      if (webSessionResources == null) {
+        // User is login in for the first time
+        final DrillbitContext drillbitContext = workManager.getContext();
+        final DrillConfig config = drillbitContext.getConfig();
+        final UserSession drillUserSession = UserSession.Builder.newBuilder()
+                .withCredentials(UserBitShared.UserCredentials.newBuilder()
+                        .setUserName(sessionUserPrincipal.getName())
+                        .build())
+                .withOptionManager(drillbitContext.getOptionManager())
+                .setSupportComplexTypes(config.getBoolean(ExecConstants.CLIENT_SUPPORT_COMPLEX_TYPES))
+                .build();
+
+        // Only try getting remote address in first login since it's a costly operation.
+        SocketAddress remoteAddress = null;
+        try {
+          // This can be slow as the underlying library will try to resolve the address
+          remoteAddress = new InetSocketAddress(InetAddress.getByName(request.getRemoteAddr()), request.getRemotePort());
+          session.setAttribute(SocketAddress.class.getSimpleName(), remoteAddress);
+        } catch (Exception ex) {
+          //no-op
+          logger.trace("Failed to get the remote address of the http session request", ex);
+        }
+
+        // Create per session BufferAllocator and set it in session
+        final String sessionAllocatorName = String.format("WebServer:AuthUserSession:%s", session.getId());
+        final BufferAllocator sessionAllocator = workManager.getContext().getAllocator().newChildAllocator(
+                sessionAllocatorName,
+                config.getLong(ExecConstants.HTTP_SESSION_MEMORY_RESERVATION),
+                config.getLong(ExecConstants.HTTP_SESSION_MEMORY_MAXIMUM));
+
+        // Create a WebSessionResource instance which owns the lifecycle of all the session resources.
+        // Set this instance as an attribute of HttpSession, since it will be used until session is destroyed.
+        webSessionResources = new WebSessionResources(sessionAllocator, remoteAddress, drillUserSession);
+        session.setAttribute(WebSessionResources.class.getSimpleName(), webSessionResources);
+      }
+      // Create a new WebUserConnection for the request
+      return new WebUserConnection(webSessionResources);
+    }
+
+    @Override
+    public void dispose(WebUserConnection instance) {
+
+    }
+  }
+
+  public static class AnonWebUserConnectionProvider implements Factory<WebUserConnection> {
+
+    @Inject
+    HttpServletRequest request;
+
+    @Inject
+    WorkManager workManager;
+
+    @Override
+    public WebUserConnection provide() {
+      final HttpSession session = request.getSession();
+      final DrillbitContext drillbitContext = workManager.getContext();
+      final DrillConfig config = drillbitContext.getConfig();
+
+      // Create an allocator here for each request
+      final BufferAllocator sessionAllocator = drillbitContext.getAllocator()
+              .newChildAllocator("WebServer:AnonUserSession",
+                      config.getLong(ExecConstants.HTTP_SESSION_MEMORY_RESERVATION),
+                      config.getLong(ExecConstants.HTTP_SESSION_MEMORY_MAXIMUM));
+
+      final Principal sessionUserPrincipal = new AnonDrillUserPrincipal();
+
+      // Create new UserSession for each request from Anonymous user
+      final UserSession drillUserSession = UserSession.Builder.newBuilder()
+              .withCredentials(UserBitShared.UserCredentials.newBuilder()
+                      .setUserName(sessionUserPrincipal.getName())
+                      .build())
+              .withOptionManager(drillbitContext.getOptionManager())
+              .setSupportComplexTypes(drillbitContext.getConfig().getBoolean(ExecConstants.CLIENT_SUPPORT_COMPLEX_TYPES))
+              .build();
+
+      // Try to get the remote Address but set it to null in case of failure.
+      SocketAddress remoteAddress = null;
+      try {
+        // This can be slow as the underlying library will try to resolve the address
+        remoteAddress = new InetSocketAddress(InetAddress.getByName(request.getRemoteAddr()), request.getRemotePort());
+      } catch (Exception ex) {
+        // no-op
+        logger.trace("Failed to get the remote address of the http session request", ex);
+      }
+
+      final WebSessionResources webSessionResources = new WebSessionResources(sessionAllocator,
+              remoteAddress, drillUserSession);
+
+      // Create a AnonWenUserConnection for this request
+      return new AnonWebUserConnection(webSessionResources);
+    }
+
+    @Override
+    public void dispose(WebUserConnection instance) {
+
+    }
+  }
+
   // Provider which injects DrillUserPrincipal directly instead of getting it from SecurityContext and typecasting
   public static class DrillUserPrincipalProvider implements Factory<DrillUserPrincipal> {
 
@@ -116,12 +250,11 @@ public class DrillRestServer extends ResourceConfig {
 
   // Provider which creates and cleanups DrillUserPrincipal for anonymous (auth disabled) mode
   public static class AnonDrillUserPrincipalProvider implements Factory<DrillUserPrincipal> {
-    @Inject WorkManager workManager;
 
     @RequestScoped
     @Override
     public DrillUserPrincipal provide() {
-      return new AnonDrillUserPrincipal(workManager.getContext());
+      return new AnonDrillUserPrincipal();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/QueryResources.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/QueryResources.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/QueryResources.java
index 433efaf..99e26ff 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/QueryResources.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/QueryResources.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -17,8 +17,14 @@
  */
 package org.apache.drill.exec.server.rest;
 
-import java.util.List;
-import java.util.Map;
+import com.google.common.base.CharMatcher;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import org.apache.drill.exec.server.rest.DrillRestServer.UserAuthEnabled;
+import org.apache.drill.exec.server.rest.auth.DrillUserPrincipal;
+import org.apache.drill.exec.server.rest.QueryWrapper.QueryResult;
+import org.apache.drill.exec.work.WorkManager;
+import org.glassfish.jersey.server.mvc.Viewable;
 
 import javax.annotation.security.RolesAllowed;
 import javax.inject.Inject;
@@ -30,16 +36,8 @@ import javax.ws.rs.Path;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.SecurityContext;
-
-import com.google.common.base.CharMatcher;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import org.apache.drill.exec.client.DrillClient;
-import org.apache.drill.exec.memory.BufferAllocator;
-import org.apache.drill.exec.server.rest.DrillRestServer.UserAuthEnabled;
-import org.apache.drill.exec.server.rest.auth.DrillUserPrincipal;
-import org.apache.drill.exec.work.WorkManager;
-import org.glassfish.jersey.server.mvc.Viewable;
+import java.util.List;
+import java.util.Map;
 
 @Path("/")
 @RolesAllowed(DrillUserPrincipal.AUTHENTICATED_ROLE)
@@ -49,7 +47,8 @@ public class QueryResources {
   @Inject UserAuthEnabled authEnabled;
   @Inject WorkManager work;
   @Inject SecurityContext sc;
-  @Inject DrillUserPrincipal principal;
+  @Inject WebUserConnection webUserConnection;
+
 
   @GET
   @Path("/query")
@@ -62,15 +61,13 @@ public class QueryResources {
   @Path("/query.json")
   @Consumes(MediaType.APPLICATION_JSON)
   @Produces(MediaType.APPLICATION_JSON)
-  public QueryWrapper.QueryResult submitQueryJSON(QueryWrapper query) throws Exception {
-    DrillClient drillClient = null;
-
+  public QueryResult submitQueryJSON(QueryWrapper query) throws Exception {
     try {
-      final BufferAllocator allocator = work.getContext().getAllocator();
-      drillClient = principal.getDrillClient();
-      return query.run(drillClient, allocator);
+      // Run the query
+      return query.run(work, webUserConnection);
     } finally {
-      principal.recycleDrillClient(drillClient);
+      // no-op for authenticated user
+      webUserConnection.cleanupSession();
     }
   }
 
@@ -78,12 +75,14 @@ public class QueryResources {
   @Path("/query")
   @Consumes(MediaType.APPLICATION_FORM_URLENCODED)
   @Produces(MediaType.TEXT_HTML)
-  public Viewable submitQuery(@FormParam("query") String query, @FormParam("queryType") String queryType) throws Exception {
+  public Viewable submitQuery(@FormParam("query") String query,
+                              @FormParam("queryType") String queryType) throws Exception {
     try {
       final String trimmedQueryString = CharMatcher.is(';').trimTrailingFrom(query.trim());
-      final QueryWrapper.QueryResult result = submitQueryJSON(new QueryWrapper(trimmedQueryString, queryType));
+      final QueryResult result = submitQueryJSON(new QueryWrapper(trimmedQueryString, queryType));
+
       return ViewableWithPermissions.create(authEnabled.get(), "/rest/query/result.ftl", sc, new TabularResult(result));
-    } catch(Exception | Error e) {
+    } catch (Exception | Error e) {
       logger.error("Query from Web UI Failed", e);
       return ViewableWithPermissions.create(authEnabled.get(), "/rest/query/errorMessage.ftl", sc, e);
     }
@@ -93,7 +92,7 @@ public class QueryResources {
     private final List<String> columns;
     private final List<List<String>> rows;
 
-    public TabularResult(QueryWrapper.QueryResult result) {
+    public TabularResult(QueryResult result) {
       final List<List<String>> rows = Lists.newArrayList();
       for (Map<String, String> rowMap:result.rows) {
         final List<String> row = Lists.newArrayList();
@@ -119,4 +118,6 @@ public class QueryResources {
       return rows;
     }
   }
+
+
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/QueryWrapper.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/QueryWrapper.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/QueryWrapper.java
index 6784b82..4a168dd 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/QueryWrapper.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/QueryWrapper.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,41 +18,27 @@
 
 package org.apache.drill.exec.server.rest;
 
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.google.common.collect.Maps;
+import org.apache.drill.exec.proto.UserBitShared.QueryId;
+import org.apache.drill.exec.proto.UserBitShared.QueryType;
+import org.apache.drill.exec.proto.UserProtos.RunQuery;
+import org.apache.drill.exec.proto.UserProtos.QueryResultsMode;
+import org.apache.drill.exec.work.WorkManager;
+
+import javax.xml.bind.annotation.XmlRootElement;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-
-import javax.xml.bind.annotation.XmlRootElement;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.exec.client.DrillClient;
-import org.apache.drill.exec.exception.SchemaChangeException;
-import org.apache.drill.exec.memory.BufferAllocator;
-import org.apache.drill.exec.proto.UserBitShared;
-import org.apache.drill.exec.proto.UserBitShared.QueryResult.QueryState;
-import org.apache.drill.exec.record.RecordBatchLoader;
-import org.apache.drill.exec.record.VectorWrapper;
-import org.apache.drill.exec.rpc.ConnectionThrottle;
-import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserResultsListener;
-import org.apache.drill.exec.vector.ValueVector;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.Preconditions;
 
 @XmlRootElement
 public class QueryWrapper {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(QueryWrapper.class);
 
-  private String query;
-  private String queryType;
+  private final String query;
+
+  private final String queryType;
 
   @JsonCreator
   public QueryWrapper(@JsonProperty("query") String query, @JsonProperty("queryType") String queryType) {
@@ -68,36 +54,38 @@ public class QueryWrapper {
     return queryType;
   }
 
-  public UserBitShared.QueryType getType() {
-    UserBitShared.QueryType type = UserBitShared.QueryType.SQL;
-    switch (queryType) {
-      case "SQL" : type = UserBitShared.QueryType.SQL; break;
-      case "LOGICAL" : type = UserBitShared.QueryType.LOGICAL; break;
-      case "PHYSICAL" : type = UserBitShared.QueryType.PHYSICAL; break;
-    }
-    return type;
+  public QueryType getType() {
+    return QueryType.valueOf(queryType);
   }
 
-  public QueryResult run(final DrillClient client, final BufferAllocator allocator) throws Exception {
-    Listener listener = new Listener(allocator);
-    client.runQuery(getType(), query, listener);
-    listener.waitForCompletion();
-    if (listener.results.isEmpty()) {
-      listener.results.add(Maps.<String, String>newHashMap());
+  public QueryResult run(final WorkManager workManager, final WebUserConnection webUserConnection) throws Exception {
+
+    final RunQuery runQuery = RunQuery.newBuilder().setType(getType())
+        .setPlan(getQuery())
+        .setResultsMode(QueryResultsMode.STREAM_FULL)
+        .build();
+
+    // Submit user query to Drillbit work queue.
+    final QueryId queryId = workManager.getUserWorker().submitWork(webUserConnection, runQuery);
+
+    // Wait until the query execution is complete or there is error submitting the query
+    webUserConnection.await();
+
+    if (logger.isTraceEnabled()) {
+      logger.trace("Query {} is completed ", queryId);
     }
 
-    final Map<String, String> first = listener.results.get(0);
-    for (String columnName : listener.columns) {
-      if (!first.containsKey(columnName)) {
-        first.put(columnName, null);
-      }
+    if (webUserConnection.results.isEmpty()) {
+      webUserConnection.results.add(Maps.<String, String>newHashMap());
     }
 
-    return new QueryResult(listener.columns, listener.results);
+    // Return the QueryResult.
+    return new QueryResult(webUserConnection.columns, webUserConnection.results);
   }
 
   public static class QueryResult {
     public final Collection<String> columns;
+
     public final List<Map<String, String>> rows;
 
     public QueryResult(Collection<String> columns, List<Map<String, String>> rows) {
@@ -111,77 +99,4 @@ public class QueryWrapper {
     return "QueryRequest [queryType=" + queryType + ", query=" + query + "]";
   }
 
-
-  private static class Listener implements UserResultsListener {
-    private volatile UserException exception;
-    private final CountDownLatch latch = new CountDownLatch(1);
-    private final BufferAllocator allocator;
-    public final List<Map<String, String>> results = Lists.newArrayList();
-    public final Set<String> columns = Sets.newLinkedHashSet();
-
-    Listener(BufferAllocator allocator) {
-      this.allocator = Preconditions.checkNotNull(allocator, "allocator cannot be null");
-    }
-
-    @Override
-    public void submissionFailed(UserException ex) {
-      exception = ex;
-      logger.error("Query Failed", ex);
-      latch.countDown();
-    }
-
-    @Override
-    public void queryCompleted(QueryState state) {
-      latch.countDown();
-    }
-
-    @Override
-    public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) {
-      try {
-        final int rows = result.getHeader().getRowCount();
-        if (result.hasData()) {
-          RecordBatchLoader loader = null;
-          try {
-            loader = new RecordBatchLoader(allocator);
-            loader.load(result.getHeader().getDef(), result.getData());
-            // TODO:  Clean:  DRILL-2933:  That load(...) no longer throws
-            // SchemaChangeException, so check/clean catch clause below.
-            for (int i = 0; i < loader.getSchema().getFieldCount(); ++i) {
-              columns.add(loader.getSchema().getColumn(i).getPath());
-            }
-            for (int i = 0; i < rows; ++i) {
-              final Map<String, String> record = Maps.newHashMap();
-              for (VectorWrapper<?> vw : loader) {
-                final String field = vw.getValueVector().getMetadata().getNamePart().getName();
-                final ValueVector.Accessor accessor = vw.getValueVector().getAccessor();
-                final Object value = i < accessor.getValueCount() ? accessor.getObject(i) : null;
-                final String display = value == null ? null : value.toString();
-                record.put(field, display);
-              }
-              results.add(record);
-            }
-          } finally {
-            if (loader != null) {
-              loader.clear();
-            }
-          }
-        }
-      } catch (SchemaChangeException e) {
-        throw new RuntimeException(e);
-      } finally {
-        result.release();
-      }
-    }
-
-    @Override
-    public void queryIdArrived(UserBitShared.QueryId queryId) {
-    }
-
-    public void waitForCompletion() throws Exception {
-      latch.await();
-      if (exception != null) {
-        throw exception;
-      }
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebServer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebServer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebServer.java
index 685a823..b3fb692 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebServer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebServer.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
+ * <p>
  * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -90,9 +90,13 @@ public class WebServer implements AutoCloseable {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(WebServer.class);
 
   private final DrillConfig config;
+
   private final MetricRegistry metrics;
+
   private final WorkManager workManager;
+
   private final Server embeddedJetty;
+
   private final BootStrapContext context;
 
   /**
@@ -115,6 +119,7 @@ public class WebServer implements AutoCloseable {
   }
 
   private static final String BASE_STATIC_PATH = "/rest/static/";
+
   private static final String DRILL_ICON_RESOURCE_RELATIVE_PATH = "img/drill.ico";
 
   /**
@@ -126,8 +131,7 @@ public class WebServer implements AutoCloseable {
       return;
     }
     final boolean authEnabled = config.getBoolean(ExecConstants.USER_AUTHENTICATION_ENABLED);
-    if (authEnabled && !context.getAuthProvider()
-        .containsFactory(PlainFactory.SIMPLE_NAME)) {
+    if (authEnabled && !context.getAuthProvider().containsFactory(PlainFactory.SIMPLE_NAME)) {
       logger.warn("Not starting web server. Currently Drill supports web authentication only through " +
           "username/password. But PLAIN mechanism is not configured.");
       return;
@@ -154,8 +158,7 @@ public class WebServer implements AutoCloseable {
     servletHolder.setInitOrder(1);
     servletContextHandler.addServlet(servletHolder, "/*");
 
-    servletContextHandler.addServlet(
-        new ServletHolder(new MetricsServlet(metrics)), "/status/metrics");
+    servletContextHandler.addServlet(new ServletHolder(new MetricsServlet(metrics)), "/status/metrics");
     servletContextHandler.addServlet(new ServletHolder(new ThreadDumpServlet()), "/status/threads");
 
     final ServletHolder staticHolder = new ServletHolder("static", DefaultServlet.class);
@@ -170,8 +173,8 @@ public class WebServer implements AutoCloseable {
     servletContextHandler.addServlet(staticHolder, "/static/*");
 
     if (authEnabled) {
-        servletContextHandler.setSecurityHandler(createSecurityHandler());
-        servletContextHandler.setSessionHandler(createSessionHandler(servletContextHandler.getSecurityHandler()));
+      servletContextHandler.setSecurityHandler(createSecurityHandler());
+      servletContextHandler.setSessionHandler(createSessionHandler(servletContextHandler.getSecurityHandler()));
     }
 
     if (config.getBoolean(ExecConstants.HTTP_CORS_ENABLED)) {
@@ -185,7 +188,7 @@ public class WebServer implements AutoCloseable {
       holder.setInitParameter(CrossOriginFilter.ALLOW_CREDENTIALS_PARAM,
               String.valueOf(config.getBoolean(ExecConstants.HTTP_CORS_CREDENTIALS)));
 
-      for (String path: new String[] { "*.json", "/storage/*/enable/*", "/status*" }) {
+      for (String path : new String[]{"*.json", "/storage/*/enable/*", "/status*"}) {
         servletContextHandler.addFilter(holder, path, EnumSet.of(DispatcherType.REQUEST));
       }
     }
@@ -203,7 +206,7 @@ public class WebServer implements AutoCloseable {
     sessionManager.addEventListener(new HttpSessionListener() {
       @Override
       public void sessionCreated(HttpSessionEvent se) {
-        // No-op
+
       }
 
       @Override
@@ -219,6 +222,15 @@ public class WebServer implements AutoCloseable {
           securityHandler.logout(sessionAuth);
           session.removeAttribute(SessionAuthentication.__J_AUTHENTICATED);
         }
+
+        // Clear all the resources allocated for this session
+        final WebSessionResources webSessionResources =
+            (WebSessionResources) session.getAttribute(WebSessionResources.class.getSimpleName());
+
+        if (webSessionResources != null) {
+          webSessionResources.close();
+          session.removeAttribute(WebSessionResources.class.getSimpleName());
+        }
       }
     });
 

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebSessionResources.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebSessionResources.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebSessionResources.java
new file mode 100644
index 0000000..aeed51a
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebSessionResources.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.server.rest;
+
+import io.netty.channel.ChannelPromise;
+import io.netty.channel.DefaultChannelPromise;
+import org.apache.drill.common.AutoCloseables;
+import org.apache.drill.exec.memory.BufferAllocator;
+import org.apache.drill.exec.rpc.ChannelClosedException;
+import org.apache.drill.exec.rpc.user.UserSession;
+
+import java.net.SocketAddress;
+
+/**
+ * Class holding all the resources required for Web User Session. This class is responsible for the proper cleanup of
+ * all the resources.
+ */
+public class WebSessionResources implements AutoCloseable {
+
+  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(WebSessionResources.class);
+
+  private BufferAllocator allocator;
+
+  private final SocketAddress remoteAddress;
+
+  private UserSession webUserSession;
+
+  private ChannelPromise closeFuture;
+
+  WebSessionResources(BufferAllocator allocator, SocketAddress remoteAddress, UserSession userSession) {
+    this.allocator = allocator;
+    this.remoteAddress = remoteAddress;
+    this.webUserSession = userSession;
+    closeFuture = new DefaultChannelPromise(null);
+  }
+
+  public UserSession getSession() {
+    return webUserSession;
+  }
+
+  public BufferAllocator getAllocator() {
+    return allocator;
+  }
+
+  public ChannelPromise getCloseFuture() {
+    return closeFuture;
+  }
+
+  public SocketAddress getRemoteAddress() {
+    return remoteAddress;
+  }
+
+  @Override
+  public void close() {
+
+    try {
+      AutoCloseables.close(webUserSession, allocator);
+    } catch (Exception ex) {
+      logger.error("Failure while closing the session resources", ex);
+    }
+
+    // Set the close future associated with this session.
+    if (closeFuture != null) {
+      closeFuture.setFailure(new ChannelClosedException("Http Session of the user is closed."));
+      closeFuture = null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebUserConnection.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebUserConnection.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebUserConnection.java
new file mode 100644
index 0000000..62c6efd
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/WebUserConnection.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.server.rest;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.DrillBuf;
+import io.netty.channel.ChannelFuture;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.memory.BufferAllocator;
+import org.apache.drill.exec.physical.impl.materialize.QueryWritableBatch;
+import org.apache.drill.exec.proto.GeneralRPCProtos.Ack;
+import org.apache.drill.exec.record.RecordBatchLoader;
+import org.apache.drill.exec.record.VectorWrapper;
+import org.apache.drill.exec.rpc.AbstractDisposableUserClientConnection;
+import org.apache.drill.exec.rpc.Acks;
+import org.apache.drill.exec.rpc.ConnectionThrottle;
+import org.apache.drill.exec.rpc.RpcOutcomeListener;
+import org.apache.drill.exec.rpc.user.UserSession;
+import org.apache.drill.exec.vector.ValueVector.Accessor;
+
+import java.net.SocketAddress;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * WebUserConnectionWrapper which represents the UserClientConnection for the WebUser submitting the query. It provides
+ * access to the UserSession executing the query. There is no actual physical channel corresponding to this connection
+ * wrapper.
+ */
+
+public class WebUserConnection extends AbstractDisposableUserClientConnection implements ConnectionThrottle {
+
+  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(WebUserConnection.class);
+
+  protected WebSessionResources webSessionResources;
+
+  public final List<Map<String, String>> results = Lists.newArrayList();
+
+  public final Set<String> columns = Sets.newLinkedHashSet();
+
+  WebUserConnection(WebSessionResources webSessionResources) {
+    this.webSessionResources = webSessionResources;
+  }
+
+  @Override
+  public UserSession getSession() {
+    return webSessionResources.getSession();
+  }
+
+  @Override
+  public void sendData(RpcOutcomeListener<Ack> listener, QueryWritableBatch result) {
+    // Check if there is any data or not. There can be overflow here but DrillBuf doesn't support allocating with
+    // bytes in long. Hence we are just preserving the earlier behavior and logging debug log for the case.
+    final int dataByteCount = (int) result.getByteCount();
+
+    if (dataByteCount <= 0) {
+      if (logger.isDebugEnabled()) {
+        logger.debug("Either no data received in this batch or there is BufferOverflow in dataByteCount: {}",
+            dataByteCount);
+      }
+      listener.success(Acks.OK, null);
+      return;
+    }
+
+    // If here that means there is some data for sure. Create a ByteBuf with all the data in it.
+    final int rows = result.getHeader().getRowCount();
+    final BufferAllocator allocator = webSessionResources.getAllocator();
+    final DrillBuf bufferWithData = allocator.buffer(dataByteCount);
+    try {
+      final ByteBuf[] resultDataBuffers = result.getBuffers();
+
+      for (final ByteBuf buffer : resultDataBuffers) {
+        bufferWithData.writeBytes(buffer);
+        buffer.release();
+      }
+
+      final RecordBatchLoader loader = new RecordBatchLoader(allocator);
+      try {
+        loader.load(result.getHeader().getDef(), bufferWithData);
+        // TODO:  Clean:  DRILL-2933:  That load(...) no longer throws
+        // SchemaChangeException, so check/clean catch clause below.
+        for (int i = 0; i < loader.getSchema().getFieldCount(); ++i) {
+          columns.add(loader.getSchema().getColumn(i).getPath());
+        }
+        for (int i = 0; i < rows; ++i) {
+          final Map<String, String> record = Maps.newHashMap();
+          for (VectorWrapper<?> vw : loader) {
+            final String field = vw.getValueVector().getMetadata().getNamePart().getName();
+            final Accessor accessor = vw.getValueVector().getAccessor();
+            final Object value = i < accessor.getValueCount() ? accessor.getObject(i) : null;
+            final String display = value == null ? null : value.toString();
+            record.put(field, display);
+          }
+          results.add(record);
+        }
+      } finally {
+        loader.clear();
+      }
+    } catch (Exception e) {
+      exception = UserException.systemError(e).build(logger);
+    } finally {
+      // Notify the listener with ACK.OK both in error/success case because data was send successfully from Drillbit.
+      bufferWithData.release();
+      listener.success(Acks.OK, null);
+    }
+  }
+
+  @Override
+  public ChannelFuture getChannelClosureFuture() {
+    return webSessionResources.getCloseFuture();
+  }
+
+  @Override
+  public SocketAddress getRemoteAddress() {
+    return webSessionResources.getRemoteAddress();
+  }
+
+  @Override
+  public void setAutoRead(boolean enableAutoRead) {
+    // no-op
+  }
+
+  /**
+   * For authenticated WebUser no cleanup of {@link WebSessionResources} is done since it's re-used
+   * for all the queries until lifetime of the web session.
+   */
+  public void cleanupSession() {
+    // no-op
+  }
+
+  public static class AnonWebUserConnection extends WebUserConnection {
+
+    AnonWebUserConnection(WebSessionResources webSessionResources) {
+      super(webSessionResources);
+    }
+
+    /**
+     * For anonymous WebUser after each query request is completed the {@link WebSessionResources} is cleaned up.
+     */
+    @Override
+    public void cleanupSession() {
+      webSessionResources.close();
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/AbstractDrillLoginService.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/AbstractDrillLoginService.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/AbstractDrillLoginService.java
deleted file mode 100644
index 62ddca9..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/AbstractDrillLoginService.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.server.rest.auth;
-
-import org.apache.drill.common.AutoCloseables;
-import org.apache.drill.exec.client.DrillClient;
-import org.apache.drill.exec.server.DrillbitContext;
-import org.eclipse.jetty.security.DefaultIdentityService;
-import org.eclipse.jetty.security.IdentityService;
-import org.eclipse.jetty.security.LoginService;
-import org.eclipse.jetty.server.UserIdentity;
-
-import java.util.Properties;
-
-/**
- * LoginService implementation which abstracts common functionality needed when user authentication is enabled or
- * disabled.
- */
-public abstract class AbstractDrillLoginService implements LoginService {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(AbstractDrillLoginService.class);
-
-  protected final DrillbitContext drillbitContext;
-  protected IdentityService identityService = new DefaultIdentityService();
-
-  public AbstractDrillLoginService(final DrillbitContext drillbitContext) {
-    this.drillbitContext = drillbitContext;
-  }
-
-  protected DrillClient createDrillClient(final String userName, final String password) throws Exception {
-    DrillClient drillClient = null;
-
-    try {
-      // Create a DrillClient
-      drillClient = new DrillClient(drillbitContext.getConfig(),
-          drillbitContext.getClusterCoordinator(), drillbitContext.getAllocator());
-      final Properties props = new Properties();
-      props.setProperty("user", userName);
-      if (password != null) {
-        props.setProperty("password", password);
-      }
-      drillClient.connect(props);
-      return  drillClient;
-    } catch (final Exception e) {
-      AutoCloseables.close(e, drillClient);
-      throw e;
-    }
-  }
-
-  @Override
-  public boolean validate(UserIdentity user) {
-    // This is called for every request after authentication is complete to make sure the user is still valid.
-    // Once a user is authenticated we assume that the user is still valid. This behavior is similar to ODBC/JDBC where
-    // once a user is logged-in we don't recheck the credentials again in the same session.
-    return true;
-  }
-
-  @Override
-  public IdentityService getIdentityService() {
-    return identityService;
-  }
-
-  @Override
-  public void setIdentityService(IdentityService identityService) {
-    this.identityService = identityService;
-  }
-
-  /**
-   * This gets called whenever a session is invalidated (because of user logout) or timed out.
-   * @param user
-   */
-  @Override
-  public void logout(UserIdentity user) {
-    final DrillUserPrincipal principal = (DrillUserPrincipal) user.getUserPrincipal();
-    try {
-      principal.close();
-    } catch (final Exception e) {
-      logger.error("Failure in logging out.", e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/DrillRestLoginService.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/DrillRestLoginService.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/DrillRestLoginService.java
index d865e94..2231ac7 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/DrillRestLoginService.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/DrillRestLoginService.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -17,13 +17,17 @@
  */
 package org.apache.drill.exec.server.rest.auth;
 
-import org.apache.drill.common.AutoCloseables;
 import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.client.DrillClient;
-import org.apache.drill.exec.proto.UserProtos.HandshakeStatus;
+import org.apache.drill.exec.rpc.security.AuthenticatorFactory;
+import org.apache.drill.exec.rpc.security.plain.PlainFactory;
+import org.apache.drill.exec.rpc.user.security.UserAuthenticationException;
+import org.apache.drill.exec.rpc.user.security.UserAuthenticator;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.SystemOptionManager;
 import org.apache.drill.exec.util.ImpersonationUtil;
+import org.eclipse.jetty.security.DefaultIdentityService;
+import org.eclipse.jetty.security.IdentityService;
+import org.eclipse.jetty.security.LoginService;
 import org.eclipse.jetty.server.UserIdentity;
 
 import javax.security.auth.Subject;
@@ -33,11 +37,23 @@ import java.security.Principal;
  * LoginService used when user authentication is enabled in Drillbit. It validates the user against the user
  * authenticator set in BOOT config.
  */
-public class DrillRestLoginService extends AbstractDrillLoginService {
+public class DrillRestLoginService implements LoginService {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillRestLoginService.class);
 
+  private final DrillbitContext drillbitContext;
+
+  private IdentityService identityService = new DefaultIdentityService();
+
   public DrillRestLoginService(final DrillbitContext drillbitContext) {
-    super(drillbitContext);
+    this.drillbitContext = drillbitContext;
+  }
+
+  @Override
+  public boolean validate(UserIdentity user) {
+    // This is called for every request after authentication is complete to make sure the user is still valid.
+    // Once a user is authenticated we assume that the user is still valid. This behavior is similar to ODBC/JDBC where
+    // once a user is logged-in we don't recheck the credentials again in the same session.
+    return true;
   }
 
   @Override
@@ -51,18 +67,26 @@ public class DrillRestLoginService extends AbstractDrillLoginService {
       return null;
     }
 
-    DrillClient drillClient = null;
-
     try {
-      // Create a DrillClient
-      drillClient = createDrillClient(username, (String)credentials);
+      // Authenticate WebUser locally using UserAuthenticator. If WebServer is started that guarantees the PLAIN
+      // mechanism is configured and authenticator is also available
+      final AuthenticatorFactory plainFactory = drillbitContext.getAuthProvider()
+          .getAuthenticatorFactory(PlainFactory.SIMPLE_NAME);
+      final UserAuthenticator userAuthenticator = ((PlainFactory) plainFactory).getAuthenticator();
+
+      // Authenticate the user with configured Authenticator
+      userAuthenticator.authenticate(username, credentials.toString());
+
+      logger.debug("WebUser {} is successfully authenticated", username);
 
       final SystemOptionManager sysOptions = drillbitContext.getOptionManager();
+
       final boolean isAdmin = ImpersonationUtil.hasAdminPrivileges(username,
           sysOptions.getOption(ExecConstants.ADMIN_USERS_KEY).string_val,
           sysOptions.getOption(ExecConstants.ADMIN_USER_GROUPS_KEY).string_val);
 
-      final Principal userPrincipal = new DrillUserPrincipal(username, isAdmin, drillClient);
+      // Create the UserPrincipal corresponding to logged in user.
+      final Principal userPrincipal = new DrillUserPrincipal(username, isAdmin);
 
       final Subject subject = new Subject();
       subject.getPrincipals().add(userPrincipal);
@@ -76,13 +100,34 @@ public class DrillRestLoginService extends AbstractDrillLoginService {
         return identityService.newUserIdentity(subject, userPrincipal, DrillUserPrincipal.NON_ADMIN_USER_ROLES);
       }
     } catch (final Exception e) {
-      AutoCloseables.close(e, drillClient);
-      if (e.getMessage().contains(HandshakeStatus.AUTH_FAILED.toString())) {
-        logger.trace("Authentication failed for user '{}'", username, e);
+      if (e instanceof UserAuthenticationException) {
+        logger.debug("Authentication failed for WebUser '{}'", username, e);
       } else {
-        logger.error("Error while creating the DrillClient: user '{}'", username, e);
+        logger.error("UnExpected failure occurred for WebUser {} during login.", username, e);
       }
       return null;
     }
   }
+
+  @Override
+  public IdentityService getIdentityService() {
+    return identityService;
+  }
+
+  @Override
+  public void setIdentityService(IdentityService identityService) {
+    this.identityService = identityService;
+  }
+
+  /**
+   * This gets called whenever a session is invalidated (because of user logout) or timed out.
+   * @param user - logged in UserIdentity
+   */
+  @Override
+  public void logout(UserIdentity user) {
+    // no-op
+    if(logger.isTraceEnabled()) {
+      logger.trace("Web user {} logged out.", user.getUserPrincipal().getName());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/DrillUserPrincipal.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/DrillUserPrincipal.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/DrillUserPrincipal.java
index 18539ff..6d8f301 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/DrillUserPrincipal.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/auth/DrillUserPrincipal.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,43 +18,37 @@
 package org.apache.drill.exec.server.rest.auth;
 
 import com.google.common.collect.ImmutableList;
-import org.apache.drill.common.AutoCloseables;
-import org.apache.drill.exec.client.DrillClient;
-import org.apache.drill.exec.server.DrillbitContext;
 import org.eclipse.jetty.security.MappedLoginService.RolePrincipal;
 
-import java.io.IOException;
 import java.security.Principal;
 import java.util.List;
 
+
 /**
- * Captures Drill user credentials and resources in a session.
+ * Captures Drill user credentials and privilege's of the session user.
  */
-public class DrillUserPrincipal implements Principal, AutoCloseable {
+public class DrillUserPrincipal implements Principal {
   public static final String ANONYMOUS_USER = "anonymous";
 
   public static final String AUTHENTICATED_ROLE = "authenticated";
+
   public static final String ADMIN_ROLE = "admin";
 
-  public static final String[] ADMIN_USER_ROLES = new String[] { AUTHENTICATED_ROLE, ADMIN_ROLE };
-  public static final String[] NON_ADMIN_USER_ROLES = new String[] { AUTHENTICATED_ROLE };
+  public static final String[] ADMIN_USER_ROLES = new String[]{AUTHENTICATED_ROLE, ADMIN_ROLE};
 
-  public static final List<RolePrincipal> ADMIN_PRINCIPALS = ImmutableList.of(
-      new RolePrincipal(AUTHENTICATED_ROLE),
-      new RolePrincipal(ADMIN_ROLE));
+  public static final String[] NON_ADMIN_USER_ROLES = new String[]{AUTHENTICATED_ROLE};
 
-  public static final List<RolePrincipal> NON_ADMIN_PRINCIPALS =
-      ImmutableList.of(new RolePrincipal(AUTHENTICATED_ROLE));
+  public static final List<RolePrincipal> ADMIN_PRINCIPALS = ImmutableList.of(new RolePrincipal(AUTHENTICATED_ROLE), new RolePrincipal(ADMIN_ROLE));
 
-  protected DrillClient drillClient;
+  public static final List<RolePrincipal> NON_ADMIN_PRINCIPALS = ImmutableList.of(new RolePrincipal(AUTHENTICATED_ROLE));
 
   private final String userName;
+
   private final boolean isAdmin;
 
-  public DrillUserPrincipal(final String userName, final boolean isAdmin, final DrillClient drillClient) {
+  public DrillUserPrincipal(final String userName, final boolean isAdmin) {
     this.userName = userName;
     this.isAdmin = isAdmin;
-    this.drillClient = drillClient;
   }
 
   @Override
@@ -63,24 +57,10 @@ public class DrillUserPrincipal implements Principal, AutoCloseable {
   }
 
   /**
-   * @return Return {@link DrillClient} instanced with credentials of this user principal. Returned {@link DrillClient}
-   * must be returned using {@link #recycleDrillClient(DrillClient)} for proper resource cleanup.
-   */
-  public DrillClient getDrillClient() throws IOException {
-    return drillClient;
-  }
-
-  /**
-   * Return {@link DrillClient} returned from {@link #getDrillClient()} for proper resource cleanup or reuse.
-   */
-  public void recycleDrillClient(final DrillClient client) throws IOException {
-    // default is no-op. we reuse DrillClient
-  }
-
-  /**
    * Is the user identified by this user principal can manage (read) the profile owned by the given user?
+   *
    * @param profileOwner Owner of the profile.
-   * @return
+   * @return true/false
    */
   public boolean canManageProfileOf(final String profileOwner) {
     return isAdmin || userName.equals(profileOwner);
@@ -88,49 +68,21 @@ public class DrillUserPrincipal implements Principal, AutoCloseable {
 
   /**
    * Is the user identified by this user principal can manage (cancel) the query issued by the given user?
+   *
    * @param queryUser User who launched the query.
-   * @return
+   * @return true/false
    */
   public boolean canManageQueryOf(final String queryUser) {
     return isAdmin || userName.equals(queryUser);
   }
 
-  @Override
-  public void close() throws Exception {
-    if (drillClient != null) {
-      drillClient.close();
-      drillClient = null; // Reset it to null to avoid closing multiple times.
-    }
-  }
-
   /**
    * {@link DrillUserPrincipal} for anonymous (auth disabled) mode.
    */
   public static class AnonDrillUserPrincipal extends DrillUserPrincipal {
-    private final DrillbitContext drillbitContext;
-
-    public AnonDrillUserPrincipal(final DrillbitContext drillbitContext) {
-      super(ANONYMOUS_USER, true /* in anonymous (auth disabled) mode all users are admins */, null);
-      this.drillbitContext = drillbitContext;
-    }
-
-    @Override
-    public DrillClient getDrillClient() throws IOException {
-      try {
-        // Create a DrillClient
-        drillClient = new DrillClient(drillbitContext.getConfig(),
-            drillbitContext.getClusterCoordinator(), drillbitContext.getAllocator());
-        drillClient.connect();
-        return  drillClient;
-      } catch (final Exception e) {
-        AutoCloseables.close(e, drillClient);
-        throw new IOException("Failed to create DrillClient: " + e.getMessage(), e);
-      }
-    }
 
-    @Override
-    public void recycleDrillClient(DrillClient client) throws IOException {
-      drillClient.close();
+    public AnonDrillUserPrincipal() {
+      super(ANONYMOUS_USER, true /* in anonymous (auth disabled) mode all users are admins */);
     }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
index a2b09a8..5e5fef0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/Foreman.java
@@ -73,7 +73,7 @@ import org.apache.drill.exec.rpc.BaseRpcOutcomeListener;
 import org.apache.drill.exec.rpc.RpcException;
 import org.apache.drill.exec.rpc.control.ControlTunnel;
 import org.apache.drill.exec.rpc.control.Controller;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.testing.ControlsInjector;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/work/prepare/PreparedStatementProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/prepare/PreparedStatementProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/prepare/PreparedStatementProvider.java
index 45b3a8d..c0d57ab 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/prepare/PreparedStatementProvider.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/prepare/PreparedStatementProvider.java
@@ -17,23 +17,9 @@
  */
 package org.apache.drill.exec.work.prepare;
 
-import static org.apache.drill.exec.ExecConstants.CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS;
-import static org.apache.drill.exec.proto.UserProtos.RequestStatus.FAILED;
-import static org.apache.drill.exec.proto.UserProtos.RequestStatus.OK;
-import static org.apache.drill.exec.proto.UserProtos.RequestStatus.TIMEOUT;
-
-import java.math.BigDecimal;
-import java.net.SocketAddress;
-import java.sql.Date;
-import java.sql.ResultSetMetaData;
-import java.sql.Time;
-import java.sql.Timestamp;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
+import com.google.common.collect.ImmutableMap;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelFuture;
 import org.apache.drill.common.exceptions.ErrorHelper;
 import org.apache.drill.common.types.TypeProtos.DataMode;
 import org.apache.drill.common.types.TypeProtos.MajorType;
@@ -45,8 +31,6 @@ import org.apache.drill.exec.proto.GeneralRPCProtos.Ack;
 import org.apache.drill.exec.proto.UserBitShared.DrillPBError;
 import org.apache.drill.exec.proto.UserBitShared.DrillPBError.ErrorType;
 import org.apache.drill.exec.proto.UserBitShared.QueryId;
-import org.apache.drill.exec.proto.UserBitShared.QueryResult;
-import org.apache.drill.exec.proto.UserBitShared.QueryResult.QueryState;
 import org.apache.drill.exec.proto.UserBitShared.QueryType;
 import org.apache.drill.exec.proto.UserBitShared.SerializedField;
 import org.apache.drill.exec.proto.UserProtos.ColumnSearchability;
@@ -59,20 +43,31 @@ import org.apache.drill.exec.proto.UserProtos.RequestStatus;
 import org.apache.drill.exec.proto.UserProtos.ResultColumnMetadata;
 import org.apache.drill.exec.proto.UserProtos.RpcType;
 import org.apache.drill.exec.proto.UserProtos.RunQuery;
+import org.apache.drill.exec.rpc.AbstractDisposableUserClientConnection;
 import org.apache.drill.exec.rpc.Acks;
 import org.apache.drill.exec.rpc.Response;
 import org.apache.drill.exec.rpc.ResponseSender;
 import org.apache.drill.exec.rpc.RpcOutcomeListener;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.rpc.user.UserSession;
 import org.apache.drill.exec.store.ischema.InfoSchemaConstants;
 import org.apache.drill.exec.work.user.UserWorker;
 import org.joda.time.Period;
 
-import com.google.common.collect.ImmutableMap;
+import java.math.BigDecimal;
+import java.net.SocketAddress;
+import java.sql.Date;
+import java.sql.ResultSetMetaData;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
 
-import io.netty.buffer.ByteBuf;
-import io.netty.channel.ChannelFuture;
+import static org.apache.drill.exec.ExecConstants.CREATE_PREPARE_STATEMENT_TIMEOUT_MILLIS;
+import static org.apache.drill.exec.proto.UserProtos.RequestStatus.FAILED;
+import static org.apache.drill.exec.proto.UserProtos.RequestStatus.OK;
+import static org.apache.drill.exec.proto.UserProtos.RequestStatus.TIMEOUT;
 
 /**
  * Contains worker {@link Runnable} for creating a prepared statement and helper methods.
@@ -230,11 +225,9 @@ public class PreparedStatementProvider {
   /**
    * Decorator around {@link UserClientConnection} to tap the query results for LIMIT 0 query.
    */
-  private static class UserClientConnectionWrapper implements UserClientConnection {
+  private static class UserClientConnectionWrapper extends AbstractDisposableUserClientConnection {
     private final UserClientConnection inner;
-    private final CountDownLatch latch = new CountDownLatch(1);
 
-    private volatile DrillPBError error;
     private volatile List<SerializedField> fields;
 
     UserClientConnectionWrapper(UserClientConnection inner) {
@@ -257,27 +250,13 @@ public class PreparedStatementProvider {
     }
 
     @Override
-    public void sendResult(RpcOutcomeListener<Ack> listener, QueryResult result) {
-      // Release the wait latch if the query is terminated.
-      final QueryState state = result.getQueryState();
-      if (state == QueryState.FAILED || state  == QueryState.CANCELED || state == QueryState.COMPLETED) {
-        if (state == QueryState.FAILED) {
-          error = result.getError(0);
-        }
-        latch.countDown();
-      }
-
-      listener.success(Acks.OK, null);
-    }
-
-    @Override
     public void sendData(RpcOutcomeListener<Ack> listener, QueryWritableBatch result) {
       // Save the query results schema and release the buffers.
       if (fields == null) {
         fields = result.getHeader().getDef().getFieldList();
       }
 
-      for(ByteBuf buf : result.getBuffers()) {
+      for (ByteBuf buf : result.getBuffers()) {
         buf.release();
       }
 
@@ -285,24 +264,9 @@ public class PreparedStatementProvider {
     }
 
     /**
-     * Wait until the query has completed.
-     * @throws InterruptedException
-     */
-    boolean await(final long timeoutMillis) throws InterruptedException {
-      return latch.await(timeoutMillis, TimeUnit.MILLISECONDS);
-    }
-
-    /**
-     * @return Any error returned in query execution.
-     */
-    DrillPBError getError() {
-      return error;
-    }
-
-    /**
      * @return Schema returned in query result batch.
      */
-    List<SerializedField> getFields() {
+    public List<SerializedField> getFields() {
       return fields;
     }
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/PlanSplitter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/PlanSplitter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/PlanSplitter.java
index eb3e86c..7ffb224 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/PlanSplitter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/PlanSplitter.java
@@ -33,7 +33,7 @@ import org.apache.drill.exec.proto.UserBitShared.QueryId;
 import org.apache.drill.exec.proto.UserBitShared.QueryResult.QueryState;
 import org.apache.drill.exec.proto.UserProtos.GetQueryPlanFragments;
 import org.apache.drill.exec.proto.UserProtos.QueryPlanFragments;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.util.MemoryAllocationUtilities;
 import org.apache.drill.exec.util.Pointer;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/UserWorker.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/UserWorker.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/UserWorker.java
index b90b4d2..04135dc 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/UserWorker.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/user/UserWorker.java
@@ -32,7 +32,7 @@ import org.apache.drill.exec.proto.UserProtos.QueryPlanFragments;
 import org.apache.drill.exec.proto.UserProtos.RunQuery;
 import org.apache.drill.exec.rpc.Acks;
 import org.apache.drill.exec.rpc.ResponseSender;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.rpc.user.UserSession;
 import org.apache.drill.exec.rpc.user.UserSession.QueryCountIncrementer;
 import org.apache.drill.exec.server.options.OptionManager;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/main/resources/drill-module.conf
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/resources/drill-module.conf b/exec/java-exec/src/main/resources/drill-module.conf
index 5ba4526..c2a2bf0 100644
--- a/exec/java-exec/src/main/resources/drill-module.conf
+++ b/exec/java-exec/src/main/resources/drill-module.conf
@@ -126,6 +126,12 @@ drill.exec: {
       allowedMethods: ["GET", "POST", "HEAD", "OPTIONS"],
       allowedHeaders: ["X-Requested-With", "Content-Type", "Accept", "Origin"],
       credentials: true
+    },
+    session: {
+        memory: {
+            reservation: 0,
+            maximum: 9223372036854775807
+        }
     }
   },
   network: {

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/client/DumpCatTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/client/DumpCatTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/client/DumpCatTest.java
index 09a61c1..3dc31ca 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/client/DumpCatTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/client/DumpCatTest.java
@@ -36,7 +36,7 @@ import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
 import org.apache.drill.exec.proto.ExecProtos.FragmentHandle;
 import org.apache.drill.exec.proto.helper.QueryIdHelper;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestMathFunctions.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestMathFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestMathFunctions.java
index 72d582c..a6c22c5 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestMathFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestMathFunctions.java
@@ -35,7 +35,7 @@ import org.apache.drill.exec.physical.impl.SimpleRootExec;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.proto.BitControl;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.vector.Float8Vector;
 import org.apache.drill.exec.vector.IntVector;

http://git-wip-us.apache.org/repos/asf/drill/blob/874bf629/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestMultiInputAdd.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestMultiInputAdd.java b/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestMultiInputAdd.java
index cf5c239..a259e8c 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestMultiInputAdd.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/fn/impl/TestMultiInputAdd.java
@@ -31,7 +31,7 @@ import org.apache.drill.exec.pop.PopUnitTestBase;
 import org.apache.drill.exec.record.RecordBatchLoader;
 import org.apache.drill.exec.record.VectorWrapper;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserServer;
+import org.apache.drill.exec.rpc.UserClientConnection;
 import org.apache.drill.exec.server.Drillbit;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.RemoteServiceSet;
@@ -49,7 +49,7 @@ public class TestMultiInputAdd extends PopUnitTestBase {
 
 
     @Test
-    public void testMultiInputAdd(@Injectable final DrillbitContext bitContext, @Injectable UserServer.UserClientConnection connection) throws Throwable
+    public void testMultiInputAdd(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable
     {
         try (RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
              Drillbit bit = new Drillbit(CONFIG, serviceSet);


[07/12] drill git commit: DRILL-5379: Set Hdfs Block Size based on Parquet Block Size

Posted by jn...@apache.org.
DRILL-5379: Set Hdfs Block Size based on Parquet Block Size

Provide an option to specify blocksize during file creation.
This will help create parquet files with single block on HDFS, helping improve performance when we read those files.

See DRILL-5379 for details.

closes #826


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/9ab91ff2
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/9ab91ff2
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/9ab91ff2

Branch: refs/heads/master
Commit: 9ab91ff2640a8e89b92869d7dbb15acb9b602cd3
Parents: 9ba4af8
Author: Padma Penumarthy <pp...@yahoo.com>
Authored: Wed Apr 19 17:25:20 2017 -0700
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Fri Jun 2 21:43:14 2017 -0700

----------------------------------------------------------------------
 .../org/apache/drill/exec/ExecConstants.java    |  3 +++
 .../server/options/SystemOptionManager.java     |  1 +
 .../exec/store/parquet/ParquetFormatPlugin.java |  2 ++
 .../exec/store/parquet/ParquetRecordWriter.java | 22 ++++++++++++++++----
 4 files changed, 24 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/9ab91ff2/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index ba98532..7c681c1 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -167,6 +167,9 @@ public interface ExecConstants {
   OptionValidator OUTPUT_FORMAT_VALIDATOR = new StringValidator(OUTPUT_FORMAT_OPTION, "parquet");
   String PARQUET_BLOCK_SIZE = "store.parquet.block-size";
   OptionValidator PARQUET_BLOCK_SIZE_VALIDATOR = new LongValidator(PARQUET_BLOCK_SIZE, 512*1024*1024);
+  String PARQUET_WRITER_USE_SINGLE_FS_BLOCK = "store.parquet.writer.use_single_fs_block";
+  OptionValidator PARQUET_WRITER_USE_SINGLE_FS_BLOCK_VALIDATOR = new BooleanValidator(
+    PARQUET_WRITER_USE_SINGLE_FS_BLOCK, false);
   String PARQUET_PAGE_SIZE = "store.parquet.page-size";
   OptionValidator PARQUET_PAGE_SIZE_VALIDATOR = new LongValidator(PARQUET_PAGE_SIZE, 1024*1024);
   String PARQUET_DICT_PAGE_SIZE = "store.parquet.dictionary.page-size";

http://git-wip-us.apache.org/repos/asf/drill/blob/9ab91ff2/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
index 4f7ecc2..8492f36 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/SystemOptionManager.java
@@ -96,6 +96,7 @@ public class SystemOptionManager extends BaseOptionManager implements OptionMana
       ExecConstants.CAST_TO_NULLABLE_NUMERIC_OPTION,
       ExecConstants.OUTPUT_FORMAT_VALIDATOR,
       ExecConstants.PARQUET_BLOCK_SIZE_VALIDATOR,
+      ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK_VALIDATOR,
       ExecConstants.PARQUET_PAGE_SIZE_VALIDATOR,
       ExecConstants.PARQUET_DICT_PAGE_SIZE_VALIDATOR,
       ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE_VALIDATOR,

http://git-wip-us.apache.org/repos/asf/drill/blob/9ab91ff2/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetFormatPlugin.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetFormatPlugin.java
index f17d414..0eb4665 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetFormatPlugin.java
@@ -139,6 +139,8 @@ public class ParquetFormatPlugin implements FormatPlugin{
     options.put(FileSystem.FS_DEFAULT_NAME_KEY, ((FileSystemConfig)writer.getStorageConfig()).connection);
 
     options.put(ExecConstants.PARQUET_BLOCK_SIZE, context.getOptions().getOption(ExecConstants.PARQUET_BLOCK_SIZE).num_val.toString());
+    options.put(ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK,
+      context.getOptions().getOption(ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK).bool_val.toString());
     options.put(ExecConstants.PARQUET_PAGE_SIZE, context.getOptions().getOption(ExecConstants.PARQUET_PAGE_SIZE).num_val.toString());
     options.put(ExecConstants.PARQUET_DICT_PAGE_SIZE, context.getOptions().getOption(ExecConstants.PARQUET_DICT_PAGE_SIZE).num_val.toString());
 

http://git-wip-us.apache.org/repos/asf/drill/blob/9ab91ff2/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
index 7536d78..bc495a3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordWriter.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.store.parquet;
 
+import static java.lang.Math.ceil;
 import static java.lang.Math.max;
 import static java.lang.Math.min;
 
@@ -77,6 +78,7 @@ public class ParquetRecordWriter extends ParquetOutputRecordWriter {
   private static final int MINIMUM_BUFFER_SIZE = 64 * 1024;
   private static final int MINIMUM_RECORD_COUNT_FOR_CHECK = 100;
   private static final int MAXIMUM_RECORD_COUNT_FOR_CHECK = 10000;
+  private static final int BLOCKSIZE_MULTIPLE = 64 * 1024;
 
   public static final String DRILL_VERSION_PROPERTY = "drill.version";
   public static final String WRITER_VERSION_PROPERTY = "drill-writer.version";
@@ -89,6 +91,7 @@ public class ParquetRecordWriter extends ParquetOutputRecordWriter {
   private int pageSize;
   private int dictionaryPageSize;
   private boolean enableDictionary = false;
+  private boolean useSingleFSBlock = false;
   private CompressionCodecName codec = CompressionCodecName.SNAPPY;
   private WriterVersion writerVersion = WriterVersion.PARQUET_1_0;
   private CodecFactory codecFactory;
@@ -156,6 +159,12 @@ public class ParquetRecordWriter extends ParquetOutputRecordWriter {
     }
 
     enableDictionary = Boolean.parseBoolean(writerOptions.get(ExecConstants.PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING));
+    useSingleFSBlock = Boolean.parseBoolean(writerOptions.get(ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK));
+
+    if (useSingleFSBlock) {
+      // Round up blockSize to multiple of 64K.
+      blockSize = (int)ceil((double)blockSize/BLOCKSIZE_MULTIPLE) * BLOCKSIZE_MULTIPLE;
+    }
   }
 
   private boolean containsComplexVectors(BatchSchema schema) {
@@ -380,14 +389,19 @@ public class ParquetRecordWriter extends ParquetOutputRecordWriter {
 
       // since ParquetFileWriter will overwrite empty output file (append is not supported)
       // we need to re-apply file permission
-      parquetFileWriter = new ParquetFileWriter(conf, schema, path, ParquetFileWriter.Mode.OVERWRITE);
+      if (useSingleFSBlock) {
+        // Passing blockSize creates files with this blockSize instead of filesystem default blockSize.
+        // Currently, this is supported only by filesystems included in
+        // BLOCK_FS_SCHEMES (ParquetFileWriter.java in parquet-mr), which includes HDFS.
+        // For other filesystems, it uses default blockSize configured for the file system.
+        parquetFileWriter = new ParquetFileWriter(conf, schema, path, ParquetFileWriter.Mode.OVERWRITE, blockSize, 0);
+      } else {
+        parquetFileWriter = new ParquetFileWriter(conf, schema, path, ParquetFileWriter.Mode.OVERWRITE);
+      }
       storageStrategy.applyToFile(fs, path);
-
       parquetFileWriter.start();
     }
-
     recordCount++;
-
     checkBlockSizeReached();
   }
 


[09/12] drill git commit: DRILL-5356: Refactor Parquet Record Reader

Posted by jn...@apache.org.
http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/test/resources/parquet/expected/star.csv
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/parquet/expected/star.csv b/exec/java-exec/src/test/resources/parquet/expected/star.csv
new file mode 100644
index 0000000..6d7e85b
--- /dev/null
+++ b/exec/java-exec/src/test/resources/parquet/expected/star.csv
@@ -0,0 +1,20 @@
+1,"Supplier#000000001"," N kD4on9OM Ipw3,gf0JBoQDd7tgrzrddZ",17,"27-918-335-1736",5755.94,"each slyly above the careful"
+2,"Supplier#000000002","89eJ5ksX3ImxJQBvxObC,",5,"15-679-861-2259",4032.68," slyly bold instructions. idle dependen"
+3,"Supplier#000000003","q1,G3Pj6OjIuUYfUoH18BFTKP5aU9bEV3",1,"11-383-516-1199",4192.4,"blithely silent requests after the express dependencies are sl"
+4,"Supplier#000000004","Bk7ah4CK8SYQTepEmvMkkgMwg",15,"25-843-787-7479",4641.08,"riously even requests above the exp"
+5,"Supplier#000000005","Gcdm2rJRzl5qlTVzc",11,"21-151-690-3663",-283.84,". slyly regular pinto bea"
+6,"Supplier#000000006","tQxuVm7s7CnK",14,"24-696-997-4969",1365.79,"final accounts. regular dolphins use against the furiously ironic decoys. "
+7,"Supplier#000000007","s,4TicNGB4uO6PaSqNBUq",23,"33-990-965-2201",6820.35,"s unwind silently furiously regular courts. final requests are deposits. requests wake quietly blit"
+8,"Supplier#000000008","9Sq4bBH2FQEmaFOocY45sRTxo6yuoG",17,"27-498-742-3860",7627.85,"al pinto beans. asymptotes haggl"
+9,"Supplier#000000009","1KhUgZegwM3ua7dsYmekYBsK",10,"20-403-398-8662",5302.37,"s. unusual, even requests along the furiously regular pac"
+10,"Supplier#000000010","Saygah3gYWMp72i PY",24,"34-852-489-8585",3891.91,"ing waters. regular requests ar"
+11,"Supplier#000000011","JfwTs,LZrV, M,9C",18,"28-613-996-1505",3393.08,"y ironic packages. slyly ironic accounts affix furiously; ironically unusual excuses across the flu"
+12,"Supplier#000000012","aLIW  q0HYd",8,"18-179-925-7181",1432.69,"al packages nag alongside of the bold instructions. express, daring accounts"
+13,"Supplier#000000013","HK71HQyWoqRWOX8GI FpgAifW,2PoH",3,"13-727-620-7813",9107.22,"requests engage regularly instructions. furiously special requests ar"
+14,"Supplier#000000014","EXsnO5pTNj4iZRm",15,"25-656-247-5058",9189.82,"l accounts boost. fluffily bold warhorses wake"
+15,"Supplier#000000015","olXVbNBfVzRqgokr1T,Ie",8,"18-453-357-6394",308.56," across the furiously regular platelets wake even deposits. quickly express she"
+16,"Supplier#000000016","YjP5C55zHDXL7LalK27zfQnwejdpin4AMpvh",22,"32-822-502-4215",2972.26,"ously express ideas haggle quickly dugouts? fu"
+17,"Supplier#000000017","c2d,ESHRSkK3WYnxpgw6aOqN0q",19,"29-601-884-9219",1687.81,"eep against the furiously bold ideas. fluffily bold packa"
+18,"Supplier#000000018","PGGVE5PWAMwKDZw ",16,"26-729-551-1115",7040.82,"accounts snooze slyly furiously bold "
+19,"Supplier#000000019","edZT3es,nBFD8lBXTGeTl",24,"34-278-310-2731",6150.38,"refully final foxes across the dogged theodolites sleep slyly abou"
+20,"Supplier#000000020","iybAE,RmTymrZVYaFZva2SH,j",3,"13-715-945-6730",530.82,"n, ironic ideas would nag blithely about the slyly regular accounts. silent, expr"

http://git-wip-us.apache.org/repos/asf/drill/blob/676ea889/exec/java-exec/src/test/resources/parquet/expected/variableWidth.csv
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/parquet/expected/variableWidth.csv b/exec/java-exec/src/test/resources/parquet/expected/variableWidth.csv
new file mode 100644
index 0000000..cbfd9f3
--- /dev/null
+++ b/exec/java-exec/src/test/resources/parquet/expected/variableWidth.csv
@@ -0,0 +1,20 @@
+"Supplier#000000001"," N kD4on9OM Ipw3,gf0JBoQDd7tgrzrddZ","27-918-335-1736","each slyly above the careful"
+"Supplier#000000002","89eJ5ksX3ImxJQBvxObC,","15-679-861-2259"," slyly bold instructions. idle dependen"
+"Supplier#000000003","q1,G3Pj6OjIuUYfUoH18BFTKP5aU9bEV3","11-383-516-1199","blithely silent requests after the express dependencies are sl"
+"Supplier#000000004","Bk7ah4CK8SYQTepEmvMkkgMwg","25-843-787-7479","riously even requests above the exp"
+"Supplier#000000005","Gcdm2rJRzl5qlTVzc","21-151-690-3663",". slyly regular pinto bea"
+"Supplier#000000006","tQxuVm7s7CnK","24-696-997-4969","final accounts. regular dolphins use against the furiously ironic decoys. "
+"Supplier#000000007","s,4TicNGB4uO6PaSqNBUq","33-990-965-2201","s unwind silently furiously regular courts. final requests are deposits. requests wake quietly blit"
+"Supplier#000000008","9Sq4bBH2FQEmaFOocY45sRTxo6yuoG","27-498-742-3860","al pinto beans. asymptotes haggl"
+"Supplier#000000009","1KhUgZegwM3ua7dsYmekYBsK","20-403-398-8662","s. unusual, even requests along the furiously regular pac"
+"Supplier#000000010","Saygah3gYWMp72i PY","34-852-489-8585","ing waters. regular requests ar"
+"Supplier#000000011","JfwTs,LZrV, M,9C","28-613-996-1505","y ironic packages. slyly ironic accounts affix furiously; ironically unusual excuses across the flu"
+"Supplier#000000012","aLIW  q0HYd","18-179-925-7181","al packages nag alongside of the bold instructions. express, daring accounts"
+"Supplier#000000013","HK71HQyWoqRWOX8GI FpgAifW,2PoH","13-727-620-7813","requests engage regularly instructions. furiously special requests ar"
+"Supplier#000000014","EXsnO5pTNj4iZRm","25-656-247-5058","l accounts boost. fluffily bold warhorses wake"
+"Supplier#000000015","olXVbNBfVzRqgokr1T,Ie","18-453-357-6394"," across the furiously regular platelets wake even deposits. quickly express she"
+"Supplier#000000016","YjP5C55zHDXL7LalK27zfQnwejdpin4AMpvh","32-822-502-4215","ously express ideas haggle quickly dugouts? fu"
+"Supplier#000000017","c2d,ESHRSkK3WYnxpgw6aOqN0q","29-601-884-9219","eep against the furiously bold ideas. fluffily bold packa"
+"Supplier#000000018","PGGVE5PWAMwKDZw ","26-729-551-1115","accounts snooze slyly furiously bold "
+"Supplier#000000019","edZT3es,nBFD8lBXTGeTl","34-278-310-2731","refully final foxes across the dogged theodolites sleep slyly abou"
+"Supplier#000000020","iybAE,RmTymrZVYaFZva2SH,j","13-715-945-6730","n, ironic ideas would nag blithely about the slyly regular accounts. silent, expr"


[06/12] drill git commit: DRILL-5512: Standardize error handling in ScanBatch

Posted by jn...@apache.org.
DRILL-5512: Standardize error handling in ScanBatch

Standardizes error handling to throw a UserException. Prior code threw
various exceptions, called the fail() method, or returned a variety of
status codes.

closes #838


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/78739889
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/78739889
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/78739889

Branch: refs/heads/master
Commit: 78739889164c8df84fee249310f6d72d1199ea04
Parents: 155820a
Author: Paul Rogers <pr...@maprtech.com>
Authored: Mon May 15 15:00:21 2017 -0700
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Fri Jun 2 21:43:14 2017 -0700

----------------------------------------------------------------------
 .../drill/common/exceptions/UserException.java  | 12 +++--
 .../drill/exec/physical/impl/ScanBatch.java     | 51 ++++++++++----------
 .../apache/drill/common/types/TypeProtos.java   |  4 +-
 .../apache/drill/exec/proto/UserBitShared.java  | 16 ++++--
 protocol/src/main/protobuf/Types.proto          |  7 +--
 protocol/src/main/protobuf/UserBitShared.proto  |  8 ++-
 6 files changed, 56 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/78739889/common/src/main/java/org/apache/drill/common/exceptions/UserException.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/drill/common/exceptions/UserException.java b/common/src/main/java/org/apache/drill/common/exceptions/UserException.java
index 87b3fd4..dd4fd36 100644
--- a/common/src/main/java/org/apache/drill/common/exceptions/UserException.java
+++ b/common/src/main/java/org/apache/drill/common/exceptions/UserException.java
@@ -77,6 +77,14 @@ public class UserException extends DrillRuntimeException {
    * <p>The cause message will be used unless {@link Builder#message(String, Object...)} is called.
    * <p>If the wrapped exception is, or wraps, a user exception it will be returned by {@link Builder#build(Logger)}
    * instead of creating a new exception. Any added context will be added to the user exception as well.
+   * <p>
+   * This exception, previously deprecated, has been repurposed to indicate unspecified
+   * errors. In particular, the case in which a lower level bit of code throws an
+   * exception other than UserException. The catching code then only knows "something went
+   * wrong", but not enough information to categorize the error.
+   * <p>
+   * System errors also indicate illegal internal states, missing functionality, and other
+   * code-related errors -- all of which "should never occur."
    *
    * @see org.apache.drill.exec.proto.UserBitShared.DrillPBError.ErrorType#SYSTEM
    *
@@ -84,10 +92,8 @@ public class UserException extends DrillRuntimeException {
    *              returned by the builder instead of creating a new user exception
    * @return user exception builder
    *
-   * @deprecated This method should never need to be used explicitly, unless you are passing the exception to the
-   *             Rpc layer or UserResultListener.submitFailed()
    */
-  @Deprecated
+
   public static Builder systemError(final Throwable cause) {
     return new Builder(DrillPBError.ErrorType.SYSTEM, cause);
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/78739889/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
index 5a9af39..4218069 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScanBatch.java
@@ -86,31 +86,32 @@ public class ScanBatch implements CloseableRecordBatch {
 
   public ScanBatch(PhysicalOperator subScanConfig, FragmentContext context,
                    OperatorContext oContext, Iterator<RecordReader> readers,
-                   List<Map<String, String>> implicitColumns) throws ExecutionSetupException {
+                   List<Map<String, String>> implicitColumns) {
     this.context = context;
     this.readers = readers;
     if (!readers.hasNext()) {
-      throw new ExecutionSetupException("A scan batch must contain at least one reader.");
+      throw UserException.systemError(
+          new ExecutionSetupException("A scan batch must contain at least one reader."))
+        .build(logger);
     }
     currentReader = readers.next();
     this.oContext = oContext;
     allocator = oContext.getAllocator();
     mutator = new Mutator(oContext, allocator, container);
 
-    boolean setup = false;
     try {
       oContext.getStats().startProcessing();
       currentReader.setup(oContext, mutator);
-      setup = true;
-    } finally {
-      // if we had an exception during setup, make sure to release existing data.
-      if (!setup) {
-        try {
-          currentReader.close();
-        } catch(final Exception e) {
-          throw new ExecutionSetupException(e);
-        }
+    } catch (ExecutionSetupException e) {
+      try {
+        currentReader.close();
+      } catch(final Exception e2) {
+        logger.error("Close failed for reader " + currentReader.getClass().getSimpleName(), e2);
       }
+      throw UserException.systemError(e)
+            .addContext("Setup failed for", currentReader.getClass().getSimpleName())
+            .build(logger);
+    } finally {
       oContext.getStats().stopProcessing();
     }
     this.implicitColumns = implicitColumns.iterator();
@@ -173,9 +174,8 @@ public class ScanBatch implements CloseableRecordBatch {
 
         currentReader.allocate(mutator.fieldVectorMap());
       } catch (OutOfMemoryException e) {
-        logger.debug("Caught Out of Memory Exception", e);
         clearFieldVectorMap();
-        return IterOutcome.OUT_OF_MEMORY;
+        throw UserException.memoryError(e).build(logger);
       }
       while ((recordCount = currentReader.next()) == 0) {
         try {
@@ -213,17 +213,16 @@ public class ScanBatch implements CloseableRecordBatch {
           try {
             currentReader.allocate(mutator.fieldVectorMap());
           } catch (OutOfMemoryException e) {
-            logger.debug("Caught OutOfMemoryException");
             clearFieldVectorMap();
-            return IterOutcome.OUT_OF_MEMORY;
+            throw UserException.memoryError(e).build(logger);
           }
           addImplicitVectors();
         } catch (ExecutionSetupException e) {
-          this.context.fail(e);
           releaseAssets();
-          return IterOutcome.STOP;
+          throw UserException.systemError(e).build(logger);
         }
       }
+
       // At this point, the current reader has read 1 or more rows.
 
       hasReadNonEmptyFile = true;
@@ -245,18 +244,15 @@ public class ScanBatch implements CloseableRecordBatch {
         return IterOutcome.OK;
       }
     } catch (OutOfMemoryException ex) {
-      context.fail(UserException.memoryError(ex).build(logger));
-      return IterOutcome.STOP;
+      throw UserException.memoryError(ex).build(logger);
     } catch (Exception ex) {
-      logger.debug("Failed to read the batch. Stopping...", ex);
-      context.fail(ex);
-      return IterOutcome.STOP;
+      throw UserException.systemError(ex).build(logger);
     } finally {
       oContext.getStats().stopProcessing();
     }
   }
 
-  private void addImplicitVectors() throws ExecutionSetupException {
+  private void addImplicitVectors() {
     try {
       if (implicitVectors != null) {
         for (ValueVector v : implicitVectors.values()) {
@@ -274,7 +270,10 @@ public class ScanBatch implements CloseableRecordBatch {
         }
       }
     } catch(SchemaChangeException e) {
-      throw new ExecutionSetupException(e);
+      // No exception should be thrown here.
+      throw UserException.systemError(e)
+        .addContext("Failure while allocating implicit vectors")
+        .build(logger);
     }
   }
 
@@ -324,7 +323,7 @@ public class ScanBatch implements CloseableRecordBatch {
    * this scan batch. Made visible so that tests can create this mutator
    * without also needing a ScanBatch instance. (This class is really independent
    * of the ScanBatch, but resides here for historical reasons. This is,
-   * in turn, the only use of the genereated vector readers in the vector
+   * in turn, the only use of the generated vector readers in the vector
    * package.)
    */
 

http://git-wip-us.apache.org/repos/asf/drill/blob/78739889/protocol/src/main/java/org/apache/drill/common/types/TypeProtos.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/common/types/TypeProtos.java b/protocol/src/main/java/org/apache/drill/common/types/TypeProtos.java
index ff5698a..1fa4848 100644
--- a/protocol/src/main/java/org/apache/drill/common/types/TypeProtos.java
+++ b/protocol/src/main/java/org/apache/drill/common/types/TypeProtos.java
@@ -170,7 +170,7 @@ public final class TypeProtos {
      * <code>FLOAT4 = 18;</code>
      *
      * <pre>
-     *  4 byte ieee 754 
+     *  4 byte ieee 754
      * </pre>
      */
     FLOAT4(17, 18),
@@ -463,7 +463,7 @@ public final class TypeProtos {
      * <code>FLOAT4 = 18;</code>
      *
      * <pre>
-     *  4 byte ieee 754 
+     *  4 byte ieee 754
      * </pre>
      */
     public static final int FLOAT4_VALUE = 18;

http://git-wip-us.apache.org/repos/asf/drill/blob/78739889/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java
----------------------------------------------------------------------
diff --git a/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java b/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java
index d28a13d..e4261df 100644
--- a/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java
+++ b/protocol/src/main/java/org/apache/drill/exec/proto/UserBitShared.java
@@ -2178,6 +2178,10 @@ public final class UserBitShared {
        *
        * <pre>
        * equivalent to SQLNonTransientException.
+       * - unexpected internal state
+       * - uncategorized operation
+       * general user action is to contact the Drill team for
+       * assistance
        * </pre>
        */
       SYSTEM(8, 8),
@@ -2186,8 +2190,8 @@ public final class UserBitShared {
        *
        * <pre>
        * equivalent to SQLFeatureNotSupportedException
-       * - type change
-       * - schema change
+       * - unimplemented feature, option, or execution path
+       * - schema change in operator that does not support it
        * </pre>
        */
       UNSUPPORTED_OPERATION(9, 9),
@@ -2286,6 +2290,10 @@ public final class UserBitShared {
        *
        * <pre>
        * equivalent to SQLNonTransientException.
+       * - unexpected internal state
+       * - uncategorized operation
+       * general user action is to contact the Drill team for
+       * assistance
        * </pre>
        */
       public static final int SYSTEM_VALUE = 8;
@@ -2294,8 +2302,8 @@ public final class UserBitShared {
        *
        * <pre>
        * equivalent to SQLFeatureNotSupportedException
-       * - type change
-       * - schema change
+       * - unimplemented feature, option, or execution path
+       * - schema change in operator that does not support it
        * </pre>
        */
       public static final int UNSUPPORTED_OPERATION_VALUE = 9;

http://git-wip-us.apache.org/repos/asf/drill/blob/78739889/protocol/src/main/protobuf/Types.proto
----------------------------------------------------------------------
diff --git a/protocol/src/main/protobuf/Types.proto b/protocol/src/main/protobuf/Types.proto
index 71fa4ac..b2b29f0 100644
--- a/protocol/src/main/protobuf/Types.proto
+++ b/protocol/src/main/protobuf/Types.proto
@@ -24,7 +24,7 @@ option optimize_for = SPEED;
 enum MinorType {
     LATE = 0;   //  late binding type
     MAP = 1;   //  an empty map column.  Useful for conceptual setup.  Children listed within here
-    
+
     TINYINT = 3;   //  single byte signed integer
     SMALLINT = 4;   //  two byte signed integer
     INT = 5;   //  four byte signed integer
@@ -40,7 +40,7 @@ enum MinorType {
     TIMESTAMPTZ = 15;   //  unix epoch time in millis
     TIMESTAMP = 16;   //  TBD
     INTERVAL = 17;   //  TBD
-    FLOAT4 = 18;   //  4 byte ieee 754 
+    FLOAT4 = 18;   //  4 byte ieee 754
     FLOAT8 = 19;   //  8 byte ieee 754
     BIT = 20;   //  single bit value (boolean)
     FIXEDCHAR = 21;   //  utf8 fixed length string, padded with spaces
@@ -77,11 +77,8 @@ message MajorType {
   repeated MinorType sub_type = 7; // used by Union type
 }
 
-
-
 enum DataMode {
   OPTIONAL = 0; // nullable
   REQUIRED = 1; // non-nullable
   REPEATED = 2; // single, repeated-field
 }
-

http://git-wip-us.apache.org/repos/asf/drill/blob/78739889/protocol/src/main/protobuf/UserBitShared.proto
----------------------------------------------------------------------
diff --git a/protocol/src/main/protobuf/UserBitShared.proto b/protocol/src/main/protobuf/UserBitShared.proto
index b091711..65f9698 100644
--- a/protocol/src/main/protobuf/UserBitShared.proto
+++ b/protocol/src/main/protobuf/UserBitShared.proto
@@ -74,11 +74,15 @@ message DrillPBError{
      */
     RESOURCE = 7;
     /* equivalent to SQLNonTransientException.
+     * - unexpected internal state
+     * - uncategorized operation
+     * general user action is to contact the Drill team for
+     * assistance
      */
     SYSTEM = 8;
     /* equivalent to SQLFeatureNotSupportedException
-     * - type change
-     * - schema change
+     * - unimplemented feature, option, or execution path
+     * - schema change in operator that does not support it
      */
     UNSUPPORTED_OPERATION = 9;
     /* SQL validation exception


[05/12] drill git commit: DRILL-5533: Fix flag assignment in FunctionInitializer.checkInit() method

Posted by jn...@apache.org.
DRILL-5533: Fix flag assignment in FunctionInitializer.checkInit() method

Changes:
1. Fixed DCL in FunctionInitializer.checkInit() method (update flag parameter when function body is loaded).
2. Fixed ImportGrabber.getImports() method to return the list with imports.
3. Added unit tests for FunctionInitializer.
4. Minor refactoring (renamed methods, added javadoc).

closes #843


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/155820a4
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/155820a4
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/155820a4

Branch: refs/heads/master
Commit: 155820a49563d631cbafd61a8538619ced21bd95
Parents: 62326be
Author: Arina Ielchiieva <ar...@gmail.com>
Authored: Mon May 22 17:49:31 2017 +0300
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Fri Jun 2 21:43:14 2017 -0700

----------------------------------------------------------------------
 .../drill/exec/expr/fn/FunctionInitializer.java |  70 +++++------
 .../drill/exec/expr/fn/ImportGrabber.java       |  27 ++--
 .../exec/expr/fn/FunctionInitializerTest.java   | 124 +++++++++++++++++++
 3 files changed, 178 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/155820a4/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionInitializer.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionInitializer.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionInitializer.java
index 4e5ee4f..9ca6dbd 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionInitializer.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/FunctionInitializer.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -31,21 +31,18 @@ import org.codehaus.janino.Parser;
 import org.codehaus.janino.Scanner;
 import org.mortbay.util.IO;
 
-import com.google.common.collect.Maps;
-
 /**
  * To avoid the cost of initializing all functions up front,
- * this class contains all informations required to initializing a function when it is used.
+ * this class contains all information required to initializing a function when it is used.
  */
 public class FunctionInitializer {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FunctionInitializer.class);
+  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FunctionInitializer.class);
 
   private final String className;
   private final ClassLoader classLoader;
-  private Map<String, CompilationUnit> functionUnits = Maps.newHashMap();
   private Map<String, String> methods;
   private List<String> imports;
-  private volatile boolean ready;
+  private volatile boolean isLoaded;
 
   /**
    * @param className the fully qualified name of the class implementing the function
@@ -53,7 +50,6 @@ public class FunctionInitializer {
    *                    to prevent classpath collisions during loading an unloading jars
    */
   public FunctionInitializer(String className, ClassLoader classLoader) {
-    super();
     this.className = className;
     this.classLoader = classLoader;
   }
@@ -74,41 +70,43 @@ public class FunctionInitializer {
    * @return the imports of this class (for java code gen)
    */
   public List<String> getImports() {
-    checkInit();
+    loadFunctionBody();
     return imports;
   }
 
   /**
-   * @param methodName
+   * @param methodName method name
    * @return the content of the method (for java code gen inlining)
    */
   public String getMethod(String methodName) {
-    checkInit();
+    loadFunctionBody();
     return methods.get(methodName);
   }
 
-  private void checkInit() {
-    if (ready) {
+  /**
+   * Loads function body: methods (for instance, eval, setup, reset) and imports.
+   * Loading is done once per class instance upon first function invocation.
+   * Double-checked locking is used to avoid concurrency issues
+   * when two threads are trying to load the function body at the same time.
+   */
+  private void loadFunctionBody() {
+    if (isLoaded) {
       return;
     }
 
     synchronized (this) {
-      if (ready) {
+      if (isLoaded) {
         return;
       }
 
-      // get function body.
-
+      logger.trace("Getting function body for the {}", className);
       try {
         final Class<?> clazz = Class.forName(className, true, classLoader);
-        final CompilationUnit cu = get(clazz);
-
-        if (cu == null) {
-          throw new IOException(String.format("Failure while loading class %s.", clazz.getName()));
-        }
+        final CompilationUnit cu = convertToCompilationUnit(clazz);
 
         methods = MethodGrabbingVisitor.getMethods(cu, clazz);
-        imports = ImportGrabber.getMethods(cu);
+        imports = ImportGrabber.getImports(cu);
+        isLoaded = true;
 
       } catch (IOException | ClassNotFoundException e) {
         throw UserException.functionError(e)
@@ -119,20 +117,25 @@ public class FunctionInitializer {
     }
   }
 
-  private CompilationUnit get(Class<?> c) throws IOException {
-    String path = c.getName();
+  /**
+   * Using class name generates path to class source code (*.java),
+   * reads its content as string and parses it into {@link org.codehaus.janino.Java.CompilationUnit}.
+   *
+   * @param clazz function class
+   * @return compilation unit
+   * @throws IOException if did not find class or could not load it
+   */
+  private CompilationUnit convertToCompilationUnit(Class<?> clazz) throws IOException {
+    String path = clazz.getName();
     path = path.replaceFirst("\\$.*", "");
     path = path.replace(".", FileUtils.separator);
     path = "/" + path + ".java";
-    CompilationUnit cu = functionUnits.get(path);
-    if (cu != null) {
-      return cu;
-    }
 
-    try (InputStream is = c.getResourceAsStream(path)) {
+    logger.trace("Loading function code from the {}", path);
+    try (InputStream is = clazz.getResourceAsStream(path)) {
       if (is == null) {
         throw new IOException(String.format(
-            "Failure trying to located source code for Class %s, tried to read on classpath location %s", c.getName(),
+            "Failure trying to locate source code for class %s, tried to read on classpath location %s", clazz.getName(),
             path));
       }
       String body = IO.toString(is);
@@ -140,12 +143,9 @@ public class FunctionInitializer {
       // TODO: Hack to remove annotations so Janino doesn't choke. Need to reconsider this problem...
       body = body.replaceAll("@\\w+(?:\\([^\\\\]*?\\))?", "");
       try {
-        cu = new Parser(new Scanner(null, new StringReader(body))).parseCompilationUnit();
-        functionUnits.put(path, cu);
-        return cu;
+        return new Parser(new Scanner(null, new StringReader(body))).parseCompilationUnit();
       } catch (CompileException e) {
-        logger.warn("Failure while parsing function class:\n{}", body, e);
-        return null;
+          throw new IOException(String.format("Failure while loading class %s.", clazz.getName()), e);
       }
 
     }

http://git-wip-us.apache.org/repos/asf/drill/blob/155820a4/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/ImportGrabber.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/ImportGrabber.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/ImportGrabber.java
index d87e6fa..1437818 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/ImportGrabber.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/ImportGrabber.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -29,16 +29,15 @@ import org.codehaus.janino.util.Traverser;
 import com.google.common.collect.Lists;
 
 
-public class ImportGrabber{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ImportGrabber.class);
+public class ImportGrabber {
 
-  private List<String> imports = Lists.newArrayList();
+  private final List<String> imports = Lists.newArrayList();
   private final ImportFinder finder = new ImportFinder();
 
   private ImportGrabber() {
   }
 
-  public class ImportFinder extends Traverser{
+  public class ImportFinder extends Traverser {
 
     @Override
     public void traverseSingleTypeImportDeclaration(SingleTypeImportDeclaration stid) {
@@ -63,9 +62,21 @@ public class ImportGrabber{
 
   }
 
-  public static List<String> getMethods(Java.CompilationUnit cu){
-    ImportGrabber visitor = new ImportGrabber();
-    cu.getPackageMemberTypeDeclarations()[0].accept(visitor.finder.comprehensiveVisitor());
+  /**
+   * Creates list of imports that are present in compilation unit.
+   * For example:
+   * [import io.netty.buffer.DrillBuf;, import org.apache.drill.exec.expr.DrillSimpleFunc;]
+   *
+   * @param compilationUnit compilation unit
+   * @return list of imports
+   */
+  public static List<String> getImports(Java.CompilationUnit compilationUnit){
+    final ImportGrabber visitor = new ImportGrabber();
+
+    for (Java.CompilationUnit.ImportDeclaration importDeclaration : compilationUnit.importDeclarations) {
+      importDeclaration.accept(visitor.finder.comprehensiveVisitor());
+    }
+
     return visitor.imports;
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/155820a4/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/FunctionInitializerTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/FunctionInitializerTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/FunctionInitializerTest.java
new file mode 100644
index 0000000..2151095
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/expr/fn/FunctionInitializerTest.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.expr.fn;
+
+import com.google.common.collect.Lists;
+import mockit.Invocation;
+import mockit.Mock;
+import mockit.MockUp;
+import mockit.integration.junit4.JMockit;
+import org.apache.drill.common.util.TestTools;
+import org.apache.drill.exec.util.JarUtil;
+import org.codehaus.janino.Java;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.io.File;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+
+@RunWith(JMockit.class)
+public class FunctionInitializerTest {
+
+  private static final String CLASS_NAME = "com.drill.udf.CustomLowerFunction";
+  private static URLClassLoader classLoader;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    File jars = new File(TestTools.getWorkingPath(), "/src/test/resources/jars");
+    String binaryName = "DrillUDF-1.0.jar";
+    String sourceName = JarUtil.getSourceName(binaryName);
+    URL[] urls = {new File(jars, binaryName).toURI().toURL(), new File(jars, sourceName).toURI().toURL()};
+    classLoader = new URLClassLoader(urls);
+  }
+
+  @Test
+  public void testGetImports() {
+    FunctionInitializer functionInitializer = new FunctionInitializer(CLASS_NAME, classLoader);
+    List<String> actualImports = functionInitializer.getImports();
+
+    List<String> expectedImports = Lists.newArrayList(
+        "import io.netty.buffer.DrillBuf;",
+        "import org.apache.drill.exec.expr.DrillSimpleFunc;",
+        "import org.apache.drill.exec.expr.annotations.FunctionTemplate;",
+        "import org.apache.drill.exec.expr.annotations.Output;",
+        "import org.apache.drill.exec.expr.annotations.Param;",
+        "import org.apache.drill.exec.expr.holders.VarCharHolder;",
+        "import javax.inject.Inject;"
+    );
+
+    assertEquals("List of imports should match", expectedImports, actualImports);
+  }
+
+  @Test
+  public void testGetMethod() {
+    FunctionInitializer functionInitializer = new FunctionInitializer(CLASS_NAME, classLoader);
+    String actualMethod = functionInitializer.getMethod("eval");
+    assertTrue("Method body should match", actualMethod.contains("CustomLowerFunction_eval:"));
+  }
+
+  @Test
+  public void testConcurrentFunctionBodyLoad() throws Exception {
+    final FunctionInitializer functionInitializer = new FunctionInitializer(CLASS_NAME, classLoader);
+
+    final AtomicInteger counter = new AtomicInteger();
+    new MockUp<FunctionInitializer>() {
+      @Mock
+      Java.CompilationUnit convertToCompilationUnit(Invocation inv, Class<?> clazz) {
+        counter.incrementAndGet();
+        return inv.proceed();
+      }
+    };
+
+    int threadsNumber = 5;
+    ExecutorService executor = Executors.newFixedThreadPool(threadsNumber);
+
+    try {
+      List<Future<String>> results = executor.invokeAll(Collections.nCopies(threadsNumber, new Callable<String>() {
+        @Override
+        public String call() throws Exception {
+          return functionInitializer.getMethod("eval");
+        }
+      }));
+
+      final Set<String> uniqueResults = new HashSet<>();
+      for (Future<String> result : results) {
+        uniqueResults.add(result.get());
+      }
+
+      assertEquals("All threads should have received the same result", 1, uniqueResults.size());
+      assertEquals("Number of function body loads should match", 1, counter.intValue());
+
+    } finally {
+      executor.shutdownNow();
+    }
+  }
+}


[12/12] drill git commit: DRILL-5229: Update kudu-client to 1.3.0

Posted by jn...@apache.org.
DRILL-5229: Update kudu-client to 1.3.0

closes #828


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/dd2692ec
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/dd2692ec
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/dd2692ec

Branch: refs/heads/master
Commit: dd2692ecd57a180f33e51147afe063627552979e
Parents: 676ea88
Author: eskabetxe <bo...@boto.pro>
Authored: Sat May 6 13:41:36 2017 +0200
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Fri Jun 2 21:43:14 2017 -0700

----------------------------------------------------------------------
 contrib/storage-kudu/pom.xml                    |  5 ++-
 .../codegen/templates/KuduRecordWriter.java     |  4 +--
 .../drill/exec/store/kudu/DrillKuduTable.java   | 14 ++++----
 .../drill/exec/store/kudu/KuduGroupScan.java    |  4 +--
 .../drill/exec/store/kudu/KuduRecordReader.java | 30 ++++++++--------
 .../exec/store/kudu/KuduRecordWriterImpl.java   | 37 ++++++++++----------
 .../exec/store/kudu/KuduSchemaFactory.java      |  6 ++--
 .../exec/store/kudu/KuduStoragePlugin.java      |  2 +-
 .../drill/store/kudu/TestKuduConnect.java       | 30 ++++++++--------
 9 files changed, 67 insertions(+), 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/dd2692ec/contrib/storage-kudu/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/storage-kudu/pom.xml b/contrib/storage-kudu/pom.xml
index 2ba9cac..74e6eb8 100644
--- a/contrib/storage-kudu/pom.xml
+++ b/contrib/storage-kudu/pom.xml
@@ -18,7 +18,6 @@
   </parent>
 
   <artifactId>drill-kudu-storage</artifactId>
-
   <name>contrib/kudu-storage-plugin</name>
 
 
@@ -47,9 +46,9 @@
     </dependency>
 
     <dependency>
-      <groupId>org.kududb</groupId>
+      <groupId>org.apache.kudu</groupId>
       <artifactId>kudu-client</artifactId>
-      <version>0.6.0</version>
+      <version>1.3.0</version>
     </dependency>
 
   </dependencies>

http://git-wip-us.apache.org/repos/asf/drill/blob/dd2692ec/contrib/storage-kudu/src/main/codegen/templates/KuduRecordWriter.java
----------------------------------------------------------------------
diff --git a/contrib/storage-kudu/src/main/codegen/templates/KuduRecordWriter.java b/contrib/storage-kudu/src/main/codegen/templates/KuduRecordWriter.java
index 01c7c28..2b76cac 100644
--- a/contrib/storage-kudu/src/main/codegen/templates/KuduRecordWriter.java
+++ b/contrib/storage-kudu/src/main/codegen/templates/KuduRecordWriter.java
@@ -88,7 +88,7 @@ import java.lang.UnsupportedOperationException;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
-import org.kududb.client.*;
+import org.apache.kudu.client.*;
 import org.apache.drill.exec.store.*;
 
 public abstract class KuduRecordWriter extends AbstractRecordWriter implements RecordWriter {
@@ -157,7 +157,7 @@ public abstract class KuduRecordWriter extends AbstractRecordWriter implements R
             <#elseif minor.class == "VarChar" >
               byte[] bytes = new byte[holder.end - holder.start];
               holder.buffer.getBytes(holder.start, bytes);
-              row.addStringUtf8(fieldId, bytes);
+              row.addString(fieldId, new String(bytes));
             <#elseif minor.class == "VarBinary">
               byte[] bytes = new byte[holder.end - holder.start];
               holder.buffer.getBytes(holder.start, bytes);

http://git-wip-us.apache.org/repos/asf/drill/blob/dd2692ec/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/DrillKuduTable.java
----------------------------------------------------------------------
diff --git a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/DrillKuduTable.java b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/DrillKuduTable.java
index 3fc69c6..8404aac 100644
--- a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/DrillKuduTable.java
+++ b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/DrillKuduTable.java
@@ -23,9 +23,9 @@ import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.drill.exec.planner.logical.DynamicDrillTable;
-import org.kududb.ColumnSchema;
-import org.kududb.Schema;
-import org.kududb.Type;
+import org.apache.kudu.ColumnSchema;
+import org.apache.kudu.Schema;
+import org.apache.kudu.Type;
 
 import com.google.common.collect.Lists;
 
@@ -56,8 +56,6 @@ public class DrillKuduTable extends DynamicDrillTable {
 
   private RelDataType getSqlTypeFromKuduType(RelDataTypeFactory typeFactory, Type type) {
     switch (type) {
-    case BINARY:
-      return typeFactory.createSqlType(SqlTypeName.VARBINARY, Integer.MAX_VALUE);
     case BOOL:
       return typeFactory.createSqlType(SqlTypeName.BOOLEAN);
     case DOUBLE:
@@ -70,9 +68,11 @@ public class DrillKuduTable extends DynamicDrillTable {
     case INT8:
       return typeFactory.createSqlType(SqlTypeName.INTEGER);
     case STRING:
-      return typeFactory.createSqlType(SqlTypeName.VARCHAR, Integer.MAX_VALUE);
-    case TIMESTAMP:
+      return typeFactory.createSqlType(SqlTypeName.VARCHAR);
+    case UNIXTIME_MICROS:
       return typeFactory.createSqlType(SqlTypeName.TIMESTAMP);
+    case BINARY:
+      return typeFactory.createSqlType(SqlTypeName.VARBINARY, Integer.MAX_VALUE);
     default:
       throw new UnsupportedOperationException("Unsupported type.");
     }

http://git-wip-us.apache.org/repos/asf/drill/blob/dd2692ec/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduGroupScan.java
----------------------------------------------------------------------
diff --git a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduGroupScan.java b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduGroupScan.java
index 873f216..dfc3c44 100644
--- a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduGroupScan.java
+++ b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduGroupScan.java
@@ -51,8 +51,8 @@ import org.apache.drill.exec.store.schedule.AssignmentCreator;
 import org.apache.drill.exec.store.schedule.CompleteWork;
 import org.apache.drill.exec.store.schedule.EndpointByteMap;
 import org.apache.drill.exec.store.schedule.EndpointByteMapImpl;
-import org.kududb.client.LocatedTablet;
-import org.kududb.client.LocatedTablet.Replica;
+import org.apache.kudu.client.LocatedTablet;
+import org.apache.kudu.client.LocatedTablet.Replica;
 
 @JsonTypeName("kudu-scan")
 public class KuduGroupScan extends AbstractGroupScan {

http://git-wip-us.apache.org/repos/asf/drill/blob/dd2692ec/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduRecordReader.java
----------------------------------------------------------------------
diff --git a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduRecordReader.java b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduRecordReader.java
index 541daa4..ef7efcf 100644
--- a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduRecordReader.java
+++ b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduRecordReader.java
@@ -52,16 +52,16 @@ import org.apache.drill.exec.vector.TimeStampVector;
 import org.apache.drill.exec.vector.ValueVector;
 import org.apache.drill.exec.vector.VarBinaryVector;
 import org.apache.drill.exec.vector.VarCharVector;
-import org.kududb.ColumnSchema;
-import org.kududb.Schema;
-import org.kududb.Type;
-import org.kududb.client.KuduClient;
-import org.kududb.client.KuduScanner;
-import org.kududb.client.KuduScanner.KuduScannerBuilder;
-import org.kududb.client.KuduTable;
-import org.kududb.client.RowResult;
-import org.kududb.client.RowResultIterator;
-import org.kududb.client.shaded.com.google.common.collect.ImmutableMap;
+import org.apache.kudu.ColumnSchema;
+import org.apache.kudu.Schema;
+import org.apache.kudu.Type;
+import org.apache.kudu.client.KuduClient;
+import org.apache.kudu.client.KuduScanner;
+import org.apache.kudu.client.KuduScanner.KuduScannerBuilder;
+import org.apache.kudu.client.KuduTable;
+import org.apache.kudu.client.RowResult;
+import org.apache.kudu.client.RowResultIterator;
+import org.apache.kudu.client.shaded.com.google.common.collect.ImmutableMap;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
@@ -114,8 +114,8 @@ public class KuduRecordReader extends AbstractRecordReader {
       context.getStats().startWait();
       try {
         scanner = builder
-            .lowerBoundPartitionKeyRaw(scanSpec.getStartKey())
-            .exclusiveUpperBoundPartitionKeyRaw(scanSpec.getEndKey())
+            .lowerBoundRaw(scanSpec.getStartKey())
+            .exclusiveUpperBoundRaw(scanSpec.getEndKey())
             .build();
       } finally {
         context.getStats().stopWait();
@@ -138,7 +138,7 @@ public class KuduRecordReader extends AbstractRecordReader {
         .put(Type.INT32, MinorType.INT)
         .put(Type.INT64, MinorType.BIGINT)
         .put(Type.STRING, MinorType.VARCHAR)
-        .put(Type.TIMESTAMP, MinorType.TIMESTAMP)
+        .put(Type.UNIXTIME_MICROS, MinorType.TIMESTAMP)
         .build();
   }
 
@@ -236,7 +236,7 @@ public class KuduRecordReader extends AbstractRecordReader {
         break;
       }
       case STRING: {
-        ByteBuffer value = result.getBinary(pci.index);
+        ByteBuffer value = ByteBuffer.wrap(result.getString(pci.index).getBytes());
         if (pci.kuduColumn.isNullable()) {
           ((NullableVarCharVector.Mutator) pci.vv.getMutator())
               .setSafe(rowIndex, value, 0, value.remaining());
@@ -309,7 +309,7 @@ public class KuduRecordReader extends AbstractRecordReader {
               .setSafe(rowIndex, result.getLong(pci.index));
         }
         break;
-      case TIMESTAMP:
+        case UNIXTIME_MICROS:
         if (pci.kuduColumn.isNullable()) {
           ((NullableTimeStampVector.Mutator) pci.vv.getMutator())
               .setSafe(rowIndex, result.getLong(pci.index) / 1000);

http://git-wip-us.apache.org/repos/asf/drill/blob/dd2692ec/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduRecordWriterImpl.java
----------------------------------------------------------------------
diff --git a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduRecordWriterImpl.java b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduRecordWriterImpl.java
index 6b39cc5..2e40acf 100644
--- a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduRecordWriterImpl.java
+++ b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduRecordWriterImpl.java
@@ -17,11 +17,6 @@
  */
 package org.apache.drill.exec.store.kudu;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.common.types.TypeProtos.DataMode;
 import org.apache.drill.common.types.TypeProtos.MajorType;
@@ -29,15 +24,21 @@ import org.apache.drill.exec.ops.OperatorContext;
 import org.apache.drill.exec.record.BatchSchema;
 import org.apache.drill.exec.record.MaterializedField;
 import org.apache.drill.exec.record.VectorAccessible;
-import org.kududb.ColumnSchema;
-import org.kududb.Schema;
-import org.kududb.Type;
-import org.kududb.client.Insert;
-import org.kududb.client.KuduClient;
-import org.kududb.client.KuduSession;
-import org.kududb.client.KuduTable;
-import org.kududb.client.OperationResponse;
-import org.kududb.client.SessionConfiguration.FlushMode;
+import org.apache.kudu.ColumnSchema;
+import org.apache.kudu.Schema;
+import org.apache.kudu.Type;
+import org.apache.kudu.client.Insert;
+import org.apache.kudu.client.KuduClient;
+import org.apache.kudu.client.KuduSession;
+import org.apache.kudu.client.KuduTable;
+import org.apache.kudu.client.CreateTableOptions;
+import org.apache.kudu.client.OperationResponse;
+import org.apache.kudu.client.SessionConfiguration.FlushMode;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
 
 public class KuduRecordWriterImpl extends KuduRecordWriter {
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(KuduRecordWriterImpl.class);
@@ -81,7 +82,7 @@ public class KuduRecordWriterImpl extends KuduRecordWriter {
           i++;
         }
         Schema kuduSchema = new Schema(columns);
-        table = client.createTable(name, kuduSchema);
+        table = client.createTable(name, kuduSchema, new CreateTableOptions());
       }
     } catch (Exception e) {
       throw new IOException(e);
@@ -113,11 +114,11 @@ public class KuduRecordWriterImpl extends KuduRecordWriter {
     case INT:
       return Type.INT32;
     case TIMESTAMP:
-      return Type.TIMESTAMP;
-    case VARBINARY:
-      return Type.BINARY;
+      return Type.UNIXTIME_MICROS;
     case VARCHAR:
       return Type.STRING;
+    case VARBINARY:
+      return Type.BINARY;
     default:
       throw UserException
         .dataWriteError()

http://git-wip-us.apache.org/repos/asf/drill/blob/dd2692ec/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduSchemaFactory.java
----------------------------------------------------------------------
diff --git a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduSchemaFactory.java b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduSchemaFactory.java
index 34e5b2a..4d9caf3 100644
--- a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduSchemaFactory.java
+++ b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduSchemaFactory.java
@@ -31,9 +31,9 @@ import org.apache.drill.exec.planner.logical.CreateTableEntry;
 import org.apache.drill.exec.store.AbstractSchema;
 import org.apache.drill.exec.store.SchemaConfig;
 import org.apache.drill.exec.store.SchemaFactory;
-import org.kududb.Schema;
-import org.kududb.client.KuduTable;
-import org.kududb.client.ListTablesResponse;
+import org.apache.kudu.Schema;
+import org.apache.kudu.client.KuduTable;
+import org.apache.kudu.client.ListTablesResponse;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Sets;

http://git-wip-us.apache.org/repos/asf/drill/blob/dd2692ec/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduStoragePlugin.java
----------------------------------------------------------------------
diff --git a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduStoragePlugin.java b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduStoragePlugin.java
index 15aa469..0d98755 100644
--- a/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduStoragePlugin.java
+++ b/contrib/storage-kudu/src/main/java/org/apache/drill/exec/store/kudu/KuduStoragePlugin.java
@@ -24,7 +24,7 @@ import org.apache.drill.common.JSONOptions;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.AbstractStoragePlugin;
 import org.apache.drill.exec.store.SchemaConfig;
-import org.kududb.client.KuduClient;
+import org.apache.kudu.client.KuduClient;
 
 import com.fasterxml.jackson.core.type.TypeReference;
 import com.fasterxml.jackson.databind.ObjectMapper;

http://git-wip-us.apache.org/repos/asf/drill/blob/dd2692ec/contrib/storage-kudu/src/test/java/org/apache/drill/store/kudu/TestKuduConnect.java
----------------------------------------------------------------------
diff --git a/contrib/storage-kudu/src/test/java/org/apache/drill/store/kudu/TestKuduConnect.java b/contrib/storage-kudu/src/test/java/org/apache/drill/store/kudu/TestKuduConnect.java
index 0ee0134..2391fc9 100644
--- a/contrib/storage-kudu/src/test/java/org/apache/drill/store/kudu/TestKuduConnect.java
+++ b/contrib/storage-kudu/src/test/java/org/apache/drill/store/kudu/TestKuduConnect.java
@@ -18,24 +18,25 @@
 package org.apache.drill.store.kudu;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 
 import org.junit.Ignore;
 import org.junit.Test;
-import org.kududb.ColumnSchema;
-import org.kududb.Schema;
-import org.kududb.Type;
-import org.kududb.client.CreateTableOptions;
-import org.kududb.client.Insert;
-import org.kududb.client.KuduClient;
-import org.kududb.client.KuduScanner;
-import org.kududb.client.KuduSession;
-import org.kududb.client.KuduTable;
-import org.kududb.client.ListTablesResponse;
-import org.kududb.client.PartialRow;
-import org.kududb.client.RowResult;
-import org.kududb.client.RowResultIterator;
-import org.kududb.client.SessionConfiguration;
+import org.apache.kudu.ColumnSchema;
+import org.apache.kudu.Schema;
+import org.apache.kudu.Type;
+import org.apache.kudu.client.CreateTableOptions;
+import org.apache.kudu.client.Insert;
+import org.apache.kudu.client.KuduClient;
+import org.apache.kudu.client.KuduScanner;
+import org.apache.kudu.client.KuduSession;
+import org.apache.kudu.client.KuduTable;
+import org.apache.kudu.client.ListTablesResponse;
+import org.apache.kudu.client.PartialRow;
+import org.apache.kudu.client.RowResult;
+import org.apache.kudu.client.RowResultIterator;
+import org.apache.kudu.client.SessionConfiguration;
 
 @Ignore("requires remote kudu server")
 public class TestKuduConnect {
@@ -63,6 +64,7 @@ public class TestKuduConnect {
 
       CreateTableOptions builder = new CreateTableOptions();
       builder.setNumReplicas(replicas);
+      builder.setRangePartitionColumns(Arrays.asList("key"));
       for (int i = 1; i < tablets; i++) {
         PartialRow splitRow = schema.newPartialRow();
         splitRow.addInt("key", i*1000);


[04/12] drill git commit: DRILL-5481: Allow to persist profiles in-memory only with a max capacity

Posted by jn...@apache.org.
DRILL-5481: Allow to persist profiles in-memory only with a max capacity

1. Introduced an InMemoryStoreProvider with the ability to maintain a max capacity
2. DrillbitContext now explicitly has a profileStoreProvider that, by default, re-uses the general PersistentStoreProvider, unless it is InMemory, which is when #1 is used.
2. Cleanly separated out QueryProfileStoreContext
3. Converted literal values to constants within ExecConstants
4. Updated drill-module.conf for default capacity

closes #834


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/9ba4af86
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/9ba4af86
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/9ba4af86

Branch: refs/heads/master
Commit: 9ba4af860e3def8f880eef13e353a730cb3b18ea
Parents: d7bc213
Author: Kunal Khatua <kk...@maprtech.com>
Authored: Mon May 15 13:33:49 2017 -0700
Committer: Jinfeng Ni <jn...@apache.org>
Committed: Fri Jun 2 21:43:14 2017 -0700

----------------------------------------------------------------------
 .../org/apache/drill/exec/ExecConstants.java    |   2 +
 .../org/apache/drill/exec/ops/QueryContext.java |   5 +
 .../org/apache/drill/exec/server/Drillbit.java  |  20 ++-
 .../drill/exec/server/DrillbitContext.java      |  22 ++-
 .../exec/server/QueryProfileStoreContext.java   |  79 ++++++++++
 .../server/rest/profile/ProfileResources.java   |  14 +-
 .../exec/store/sys/PersistentStoreConfig.java   |  16 ++-
 .../exec/store/sys/store/InMemoryStore.java     | 143 +++++++++++++++++++
 .../store/provider/InMemoryStoreProvider.java   |  51 +++++++
 .../org/apache/drill/exec/work/WorkManager.java |   5 +-
 .../drill/exec/work/foreman/QueryManager.java   |  32 ++---
 .../src/main/resources/drill-module.conf        |   4 +
 12 files changed, 357 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 83ffb20..ba98532 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -107,6 +107,8 @@ public interface ExecConstants {
   String SYS_STORE_PROVIDER_CLASS = "drill.exec.sys.store.provider.class";
   String SYS_STORE_PROVIDER_LOCAL_PATH = "drill.exec.sys.store.provider.local.path";
   String SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE = "drill.exec.sys.store.provider.local.write";
+  String PROFILES_STORE_INMEMORY = "drill.exec.profiles.store.inmemory";
+  String PROFILES_STORE_CAPACITY = "drill.exec.profiles.store.capacity";
   String IMPERSONATION_ENABLED = "drill.exec.impersonation.enabled";
   String IMPERSONATION_MAX_CHAINED_USER_HOPS = "drill.exec.impersonation.max_chained_user_hops";
   String AUTHENTICATION_MECHANISMS = "drill.exec.security.auth.mechanisms";

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
index df3f4f4..0dbeea5 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ops/QueryContext.java
@@ -39,6 +39,7 @@ import org.apache.drill.exec.proto.UserBitShared.QueryId;
 import org.apache.drill.exec.proto.helper.QueryIdHelper;
 import org.apache.drill.exec.rpc.user.UserSession;
 import org.apache.drill.exec.server.DrillbitContext;
+import org.apache.drill.exec.server.QueryProfileStoreContext;
 import org.apache.drill.exec.server.options.OptionManager;
 import org.apache.drill.exec.server.options.OptionValue;
 import org.apache.drill.exec.server.options.QueryOptionManager;
@@ -209,6 +210,10 @@ public class QueryContext implements AutoCloseable, OptimizerRulesContext, Schem
     return drillbitContext.getConfig();
   }
 
+  public QueryProfileStoreContext getProfileStoreContext() {
+    return drillbitContext.getProfileStoreContext();
+  }
+
   @Override
   public FunctionImplementationRegistry getFunctionRegistry() {
     return drillbitContext.getFunctionImplementationRegistry();

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/server/Drillbit.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/Drillbit.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/Drillbit.java
index f225714..0d341df 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/Drillbit.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/Drillbit.java
@@ -38,6 +38,7 @@ import org.apache.drill.exec.server.rest.WebServer;
 import org.apache.drill.exec.service.ServiceEngine;
 import org.apache.drill.exec.store.StoragePluginRegistry;
 import org.apache.drill.exec.store.sys.store.provider.CachingPersistentStoreProvider;
+import org.apache.drill.exec.store.sys.store.provider.InMemoryStoreProvider;
 import org.apache.drill.exec.store.sys.PersistentStoreProvider;
 import org.apache.drill.exec.store.sys.PersistentStoreRegistry;
 import org.apache.drill.exec.store.sys.store.provider.LocalPersistentStoreProvider;
@@ -76,6 +77,7 @@ public class Drillbit implements AutoCloseable {
   private final WebServer webServer;
   private RegistrationHandle registrationHandle;
   private volatile StoragePluginRegistry storageRegistry;
+  private final PersistentStoreProvider profileStoreProvider;
 
   @VisibleForTesting
   public Drillbit(
@@ -105,6 +107,14 @@ public class Drillbit implements AutoCloseable {
       isDistributedMode = true;
     }
 
+    //Check if InMemory Profile Store, else use Default Store Provider
+    if (config.getBoolean(ExecConstants.PROFILES_STORE_INMEMORY)) {
+      profileStoreProvider = new InMemoryStoreProvider(config.getInt(ExecConstants.PROFILES_STORE_CAPACITY));
+      logger.info("Upto {} latest query profiles will be retained in-memory", config.getInt(ExecConstants.PROFILES_STORE_CAPACITY));
+    } else {
+      profileStoreProvider = storeProvider;
+    }
+
     engine = new ServiceEngine(manager, context, allowPortHunting, isDistributedMode);
 
     logger.info("Construction completed ({} ms).", w.elapsed(TimeUnit.MILLISECONDS));
@@ -115,8 +125,11 @@ public class Drillbit implements AutoCloseable {
     logger.debug("Startup begun.");
     coord.start(10000);
     storeProvider.start();
+    if (profileStoreProvider != storeProvider) {
+      profileStoreProvider.start();
+    }
     final DrillbitEndpoint md = engine.start();
-    manager.start(md, engine.getController(), engine.getDataConnectionCreator(), coord, storeProvider);
+    manager.start(md, engine.getController(), engine.getDataConnectionCreator(), coord, storeProvider, profileStoreProvider);
     final DrillbitContext drillbitContext = manager.getContext();
     storageRegistry = drillbitContext.getStorage();
     storageRegistry.init();
@@ -164,6 +177,11 @@ public class Drillbit implements AutoCloseable {
           manager,
           storageRegistry,
           context);
+
+      //Closing the profile store provider if distinct
+      if (storeProvider != profileStoreProvider) {
+        AutoCloseables.close(profileStoreProvider);
+      }
     } catch(Exception e) {
       logger.warn("Failure on close()", e);
     }

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
index 6c68ab2..b8d3e68 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/DrillbitContext.java
@@ -65,7 +65,7 @@ public class DrillbitContext implements AutoCloseable {
   private final LogicalPlanPersistence lpPersistence;
   // operator table for standard SQL operators and functions, Drill built-in UDFs
   private final DrillOperatorTable table;
-
+  private final QueryProfileStoreContext profileStoreContext;
 
   public DrillbitContext(
       DrillbitEndpoint endpoint,
@@ -75,6 +75,19 @@ public class DrillbitContext implements AutoCloseable {
       DataConnectionCreator connectionsPool,
       WorkEventBus workBus,
       PersistentStoreProvider provider) {
+    //PersistentStoreProvider is re-used for providing Query Profile Store as well
+    this(endpoint, context, coord, controller, connectionsPool, workBus, provider, provider);
+  }
+
+  public DrillbitContext(
+      DrillbitEndpoint endpoint,
+      BootStrapContext context,
+      ClusterCoordinator coord,
+      Controller controller,
+      DataConnectionCreator connectionsPool,
+      WorkEventBus workBus,
+      PersistentStoreProvider provider,
+      PersistentStoreProvider profileStoreProvider) {
     this.classpathScan = context.getClasspathScan();
     this.workBus = workBus;
     this.controller = checkNotNull(controller);
@@ -97,6 +110,13 @@ public class DrillbitContext implements AutoCloseable {
 
     // This operator table is built once and used for all queries which do not need dynamic UDF support.
     this.table = new DrillOperatorTable(functionRegistry, systemOptions);
+
+    //This profile store context is built from the profileStoreProvider
+    this.profileStoreContext = new QueryProfileStoreContext(context.getConfig(), profileStoreProvider, coord);
+  }
+
+  public QueryProfileStoreContext getProfileStoreContext() {
+    return profileStoreContext;
   }
 
   public FunctionImplementationRegistry getFunctionImplementationRegistry() {

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/server/QueryProfileStoreContext.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/QueryProfileStoreContext.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/QueryProfileStoreContext.java
new file mode 100644
index 0000000..7f282d5
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/QueryProfileStoreContext.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.server;
+
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.exceptions.DrillRuntimeException;
+import org.apache.drill.exec.ExecConstants;
+import org.apache.drill.exec.coord.ClusterCoordinator;
+import org.apache.drill.exec.coord.store.TransientStore;
+import org.apache.drill.exec.coord.store.TransientStoreConfig;
+import org.apache.drill.exec.proto.SchemaUserBitShared;
+import org.apache.drill.exec.proto.UserBitShared;
+import org.apache.drill.exec.proto.UserBitShared.QueryInfo;
+import org.apache.drill.exec.proto.UserBitShared.QueryProfile;
+import org.apache.drill.exec.store.sys.PersistentStore;
+import org.apache.drill.exec.store.sys.PersistentStoreConfig;
+import org.apache.drill.exec.store.sys.PersistentStoreProvider;
+import org.apache.drill.exec.store.sys.PersistentStoreConfig.StoreConfigBuilder;
+
+public class QueryProfileStoreContext {
+  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(QueryProfileStoreContext.class);
+
+  private static final String PROFILES = "profiles";
+
+  private static final String RUNNING = "running";
+
+  private final PersistentStore<UserBitShared.QueryProfile> completedProfiles;
+
+  private final TransientStore<UserBitShared.QueryInfo> runningProfiles;
+
+  private final PersistentStoreConfig<QueryProfile> profileStoreConfig;
+
+  public QueryProfileStoreContext(DrillConfig config, PersistentStoreProvider storeProvider,
+                                  ClusterCoordinator coordinator) {
+    profileStoreConfig = PersistentStoreConfig.newProtoBuilder(SchemaUserBitShared.QueryProfile.WRITE,
+        SchemaUserBitShared.QueryProfile.MERGE)
+        .name(PROFILES)
+        .blob()
+        .build();
+
+    try {
+      completedProfiles = storeProvider.getOrCreateStore(profileStoreConfig);
+    } catch (final Exception e) {
+      throw new DrillRuntimeException(e);
+    }
+
+    runningProfiles = coordinator.getOrCreateTransientStore(TransientStoreConfig
+        .newProtoBuilder(SchemaUserBitShared.QueryInfo.WRITE, SchemaUserBitShared.QueryInfo.MERGE)
+        .name(RUNNING)
+        .build());
+  }
+
+  public PersistentStoreConfig<QueryProfile> getProfileStoreConfig() {
+    return profileStoreConfig;
+  }
+
+  public PersistentStore<QueryProfile> getCompletedProfileStore() {
+    return completedProfiles;
+  }
+
+  public TransientStore<QueryInfo> getRunningProfileStore() {
+    return runningProfiles;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
index 044b792..468ec56 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/profile/ProfileResources.java
@@ -48,6 +48,7 @@ import org.apache.drill.exec.proto.UserBitShared.QueryInfo;
 import org.apache.drill.exec.proto.UserBitShared.QueryProfile;
 import org.apache.drill.exec.proto.helper.QueryIdHelper;
 import org.apache.drill.exec.server.rest.DrillRestServer.UserAuthEnabled;
+import org.apache.drill.exec.server.QueryProfileStoreContext;
 import org.apache.drill.exec.server.rest.ViewableWithPermissions;
 import org.apache.drill.exec.server.rest.auth.DrillUserPrincipal;
 import org.apache.drill.exec.store.sys.PersistentStore;
@@ -180,8 +181,9 @@ public class ProfileResources {
   @Produces(MediaType.APPLICATION_JSON)
   public QProfiles getProfilesJSON(@Context UriInfo uriInfo) {
     try {
-      final PersistentStore<QueryProfile> completed = getProvider().getOrCreateStore(QueryManager.QUERY_PROFILE);
-      final TransientStore<QueryInfo> running = getCoordinator().getOrCreateTransientStore(QueryManager.RUNNING_QUERY_INFO);
+      final QueryProfileStoreContext profileStoreContext = work.getContext().getProfileStoreContext();
+      final PersistentStore<QueryProfile> completed = profileStoreContext.getCompletedProfileStore();
+      final TransientStore<QueryInfo> running = profileStoreContext.getRunningProfileStore();
 
       final List<String> errors = Lists.newArrayList();
 
@@ -258,7 +260,7 @@ public class ProfileResources {
 
     // then check remote running
     try {
-      final TransientStore<QueryInfo> running = getCoordinator().getOrCreateTransientStore(QueryManager.RUNNING_QUERY_INFO);
+      final TransientStore<QueryInfo> running = work.getContext().getProfileStoreContext().getRunningProfileStore();
       final QueryInfo info = running.get(queryId);
       if (info != null) {
         QueryProfile queryProfile = work.getContext()
@@ -275,7 +277,7 @@ public class ProfileResources {
 
     // then check blob store
     try {
-      final PersistentStore<QueryProfile> profiles = getProvider().getOrCreateStore(QueryManager.QUERY_PROFILE);
+      final PersistentStore<QueryProfile> profiles = work.getContext().getProfileStoreContext().getCompletedProfileStore();
       final QueryProfile queryProfile = profiles.get(queryId);
       if (queryProfile != null) {
         checkOrThrowProfileViewAuthorization(queryProfile);
@@ -296,7 +298,7 @@ public class ProfileResources {
   @Produces(MediaType.APPLICATION_JSON)
   public String getProfileJSON(@PathParam("queryid") String queryId) {
     try {
-      return new String(QueryManager.QUERY_PROFILE.getSerializer().serialize(getQueryProfile(queryId)));
+      return new String(work.getContext().getProfileStoreContext().getProfileStoreConfig().getSerializer().serialize(getQueryProfile(queryId)));
     } catch (Exception e) {
       logger.debug("Failed to serialize profile for: " + queryId);
       return ("{ 'message' : 'error (unable to serialize profile)' }");
@@ -329,7 +331,7 @@ public class ProfileResources {
 
     // then check remote running
     try {
-      final TransientStore<QueryInfo> running = getCoordinator().getOrCreateTransientStore(QueryManager.RUNNING_QUERY_INFO);
+      final TransientStore<QueryInfo> running = work.getContext().getProfileStoreContext().getRunningProfileStore();
       final QueryInfo info = running.get(queryId);
       checkOrThrowQueryCancelAuthorization(info.getUser(), queryId);
       Ack a = work.getContext().getController().getTunnel(info.getForeman()).requestCancelQuery(id).checkedGet(2, TimeUnit.SECONDS);

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PersistentStoreConfig.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PersistentStoreConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PersistentStoreConfig.java
index 00a75a2..3b5e7ca 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PersistentStoreConfig.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/PersistentStoreConfig.java
@@ -38,11 +38,17 @@ public class PersistentStoreConfig<V> {
   private final String name;
   private final InstanceSerializer<V> valueSerializer;
   private final PersistentStoreMode mode;
+  private final int capacity;
 
-  protected PersistentStoreConfig(String name, InstanceSerializer<V> valueSerializer, PersistentStoreMode mode) {
+  protected PersistentStoreConfig(String name, InstanceSerializer<V> valueSerializer, PersistentStoreMode mode, int capacity) {
     this.name = name;
     this.valueSerializer = valueSerializer;
     this.mode = mode;
+    this.capacity = capacity;
+  }
+
+  public int getCapacity() {
+    return capacity;
   }
 
   public PersistentStoreMode getMode() {
@@ -85,6 +91,7 @@ public class PersistentStoreConfig<V> {
     private String name;
     private InstanceSerializer<V> serializer;
     private PersistentStoreMode mode = PersistentStoreMode.PERSISTENT;
+    private int capacity;
 
     protected StoreConfigBuilder(InstanceSerializer<V> serializer) {
       super();
@@ -106,9 +113,14 @@ public class PersistentStoreConfig<V> {
       return this;
     }
 
+    public StoreConfigBuilder<V> setCapacity(int capacity) {
+      this.capacity = capacity;
+      return this;
+    }
+
     public PersistentStoreConfig<V> build(){
       Preconditions.checkNotNull(name);
-      return new PersistentStoreConfig<>(name, serializer, mode);
+      return new PersistentStoreConfig<>(name, serializer, mode, capacity);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/InMemoryStore.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/InMemoryStore.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/InMemoryStore.java
new file mode 100644
index 0000000..10da92d
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/InMemoryStore.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.sys.store;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.drill.common.concurrent.AutoCloseableLock;
+import org.apache.drill.exec.exception.VersionMismatchException;
+import org.apache.drill.exec.store.sys.BasePersistentStore;
+import org.apache.drill.exec.store.sys.PersistentStoreConfig;
+import org.apache.drill.exec.store.sys.PersistentStoreMode;
+
+import com.google.common.collect.Iterables;
+
+public class InMemoryStore<V> extends BasePersistentStore<V> {
+  // private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(InMemoryPersistentStore.class);
+
+  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+  private final AutoCloseableLock readLock = new AutoCloseableLock(readWriteLock.readLock());
+  private final AutoCloseableLock writeLock = new AutoCloseableLock(readWriteLock.writeLock());
+  private final ConcurrentSkipListMap<String, V> store;
+  private int version = -1;
+  private final int capacity;
+  private final AtomicInteger currentSize = new AtomicInteger();
+
+  public InMemoryStore(int capacity) {
+    this.capacity = capacity;
+    //Allows us to trim out the oldest elements to maintain finite max size
+    this.store = new ConcurrentSkipListMap<String, V>();
+  }
+
+  @Override
+  public void delete(final String key) {
+    try (AutoCloseableLock lock = writeLock.open()) {
+      store.remove(key);
+      version++;
+    }
+  }
+
+  @Override
+  public PersistentStoreMode getMode() {
+    return PersistentStoreMode.BLOB_PERSISTENT;
+  }
+
+  @Override
+  public boolean contains(final String key) {
+    return contains(key, null);
+  }
+
+  @Override
+  public boolean contains(final String key, final DataChangeVersion dataChangeVersion) {
+    try (AutoCloseableLock lock = readLock.open()) {
+      if (dataChangeVersion != null) {
+        dataChangeVersion.setVersion(version);
+      }
+      return store.containsKey(key);
+    }
+  }
+
+  @Override
+  public V get(final String key) {
+    return get(key, null);
+  }
+
+  @Override
+  public V get(final String key, final DataChangeVersion dataChangeVersion) {
+    try (AutoCloseableLock lock = readLock.open()) {
+      if (dataChangeVersion != null) {
+        dataChangeVersion.setVersion(version);
+      }
+      return store.get(key);
+    }
+  }
+
+  @Override
+  public void put(final String key, final V value) {
+    put(key, value, null);
+  }
+
+  @Override
+  public void put(final String key, final V value, final DataChangeVersion dataChangeVersion) {
+    try (AutoCloseableLock lock = writeLock.open()) {
+      if (dataChangeVersion != null && dataChangeVersion.getVersion() != version) {
+        throw new VersionMismatchException("Version mismatch detected", dataChangeVersion.getVersion());
+      }
+      store.put(key, value);
+      if (currentSize.incrementAndGet() > capacity) {
+        //Pop Out Oldest
+        store.pollLastEntry();
+        currentSize.decrementAndGet();
+      }
+
+      version++;
+    }
+  }
+
+  @Override
+  public boolean putIfAbsent(final String key, final V value) {
+    try (AutoCloseableLock lock = writeLock.open()) {
+      final V old = store.putIfAbsent(key, value);
+      if (old == null) {
+        version++;
+        return true;
+      }
+      return false;
+    }
+  }
+
+  @Override
+  public Iterator<Map.Entry<String, V>> getRange(final int skip, final int take) {
+    try (AutoCloseableLock lock = readLock.open()) {
+      return Iterables.limit(Iterables.skip(store.entrySet(), skip), take).iterator();
+    }
+  }
+
+  @Override
+  public void close() throws Exception {
+    try (AutoCloseableLock lock = writeLock.open()) {
+      store.clear();
+      version = -1;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/InMemoryStoreProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/InMemoryStoreProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/InMemoryStoreProvider.java
new file mode 100644
index 0000000..ffe7b18
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/provider/InMemoryStoreProvider.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.sys.store.provider;
+
+import org.apache.drill.exec.exception.StoreException;
+import org.apache.drill.exec.store.sys.PersistentStore;
+import org.apache.drill.exec.store.sys.PersistentStoreConfig;
+import org.apache.drill.exec.store.sys.PersistentStoreProvider;
+import org.apache.drill.exec.store.sys.store.InMemoryStore;
+
+public class InMemoryStoreProvider implements PersistentStoreProvider {
+
+  private int capacity;
+
+  public InMemoryStoreProvider(int capacity) {
+    this.capacity = capacity;
+  }
+
+  @Override
+  public void close() throws Exception {
+    // TODO Auto-generated method stub
+
+  }
+
+  @Override
+  public <V> PersistentStore<V> getOrCreateStore(PersistentStoreConfig<V> config) throws StoreException {
+    return new InMemoryStore<>(capacity);
+  }
+
+  @Override
+  public void start() throws Exception {
+    // TODO Auto-generated method stub
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/work/WorkManager.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/WorkManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/WorkManager.java
index c352861..2d37b8c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/WorkManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/WorkManager.java
@@ -102,8 +102,9 @@ public class WorkManager implements AutoCloseable {
       final Controller controller,
       final DataConnectionCreator data,
       final ClusterCoordinator coord,
-      final PersistentStoreProvider provider) {
-    dContext = new DrillbitContext(endpoint, bContext, coord, controller, data, workBus, provider);
+      final PersistentStoreProvider provider,
+      final PersistentStoreProvider profilesProvider) {
+    dContext = new DrillbitContext(endpoint, bContext, coord, controller, data, workBus, provider, profilesProvider);
     statusThread.start();
 
     DrillMetrics.register("drill.fragments.running",

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
index 77c20a5..ecbccf3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/work/foreman/QueryManager.java
@@ -31,7 +31,6 @@ import org.apache.drill.common.exceptions.UserRemoteException;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.coord.ClusterCoordinator;
 import org.apache.drill.exec.coord.store.TransientStore;
-import org.apache.drill.exec.coord.store.TransientStoreConfig;
 import org.apache.drill.exec.proto.BitControl.FragmentStatus;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
@@ -52,7 +51,6 @@ import org.apache.drill.exec.rpc.control.Controller;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.server.options.OptionList;
 import org.apache.drill.exec.store.sys.PersistentStore;
-import org.apache.drill.exec.store.sys.PersistentStoreConfig;
 import org.apache.drill.exec.store.sys.PersistentStoreProvider;
 import org.apache.drill.exec.work.EndpointListener;
 
@@ -68,17 +66,6 @@ import com.google.common.collect.Maps;
 public class QueryManager implements AutoCloseable {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(QueryManager.class);
 
-  public static final PersistentStoreConfig<QueryProfile> QUERY_PROFILE = PersistentStoreConfig.
-          newProtoBuilder(SchemaUserBitShared.QueryProfile.WRITE, SchemaUserBitShared.QueryProfile.MERGE)
-      .name("profiles")
-      .blob()
-      .build();
-
-  public static final TransientStoreConfig<QueryInfo> RUNNING_QUERY_INFO = TransientStoreConfig
-      .newProtoBuilder(SchemaUserBitShared.QueryInfo.WRITE, SchemaUserBitShared.QueryInfo.MERGE)
-      .name("running")
-      .build();
-
   private final Map<DrillbitEndpoint, NodeTracker> nodeMap = Maps.newHashMap();
   private final QueryId queryId;
   private final String stringQueryId;
@@ -93,8 +80,8 @@ public class QueryManager implements AutoCloseable {
       new IntObjectHashMap<>();
   private final List<FragmentData> fragmentDataSet = Lists.newArrayList();
 
-  private final PersistentStore<QueryProfile> profileStore;
-  private final TransientStore<QueryInfo> transientProfiles;
+  private final PersistentStore<QueryProfile> completedProfileStore;
+  private final TransientStore<QueryInfo> runningProfileStore;
 
   // the following mutable variables are used to capture ongoing query status
   private String planText;
@@ -119,12 +106,9 @@ public class QueryManager implements AutoCloseable {
     this.foreman = foreman;
 
     stringQueryId = QueryIdHelper.getQueryId(queryId);
-    try {
-      profileStore = storeProvider.getOrCreateStore(QUERY_PROFILE);
-    } catch (final Exception e) {
-      throw new DrillRuntimeException(e);
-    }
-    transientProfiles = coordinator.getOrCreateTransientStore(RUNNING_QUERY_INFO);
+
+    this.completedProfileStore = foreman.getQueryContext().getProfileStoreContext().getCompletedProfileStore();
+    this.runningProfileStore = foreman.getQueryContext().getProfileStoreContext().getRunningProfileStore();
   }
 
   private static boolean isTerminal(final FragmentState state) {
@@ -298,7 +282,7 @@ public class QueryManager implements AutoCloseable {
       case STARTING:
       case RUNNING:
       case CANCELLATION_REQUESTED:
-        transientProfiles.put(stringQueryId, getQueryInfo());  // store as ephemeral query profile.
+        runningProfileStore.put(stringQueryId, getQueryInfo());  // store as ephemeral query profile.
         inTransientStore = true;
         break;
 
@@ -306,7 +290,7 @@ public class QueryManager implements AutoCloseable {
       case CANCELED:
       case FAILED:
         try {
-          transientProfiles.remove(stringQueryId);
+          runningProfileStore.remove(stringQueryId);
           inTransientStore = false;
         } catch(final Exception e) {
           logger.warn("Failure while trying to delete the estore profile for this query.", e);
@@ -321,7 +305,7 @@ public class QueryManager implements AutoCloseable {
   void writeFinalProfile(UserException ex) {
     try {
       // TODO(DRILL-2362) when do these ever get deleted?
-      profileStore.put(stringQueryId, getQueryProfile(ex));
+      completedProfileStore.put(stringQueryId, getQueryProfile(ex));
     } catch (Exception e) {
       logger.error("Failure while storing Query Profile", e);
     }

http://git-wip-us.apache.org/repos/asf/drill/blob/9ba4af86/exec/java-exec/src/main/resources/drill-module.conf
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/resources/drill-module.conf b/exec/java-exec/src/main/resources/drill-module.conf
index 7c095ac..5ba4526 100644
--- a/exec/java-exec/src/main/resources/drill-module.conf
+++ b/exec/java-exec/src/main/resources/drill-module.conf
@@ -141,6 +141,10 @@ drill.exec: {
       write: true
     }
   },
+  profiles.store: {
+    inmemory: false,
+    capacity: 1000
+  },
   impersonation: {
     enabled: false,
     max_chained_user_hops: 3