You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2016/10/25 21:43:37 UTC

[18/28] hive git commit: HIVE-14391. TestAccumuloCliDriver is not executed during precommit tests. (Peter Vary, reviewed by Siddharth Seth)

HIVE-14391. TestAccumuloCliDriver is not executed during precommit tests. (Peter Vary, reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ad5d8169
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ad5d8169
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ad5d8169

Branch: refs/heads/hive-14535
Commit: ad5d8169752489d27d33afe43d263dec09bb667d
Parents: 5c35d0c
Author: Siddharth Seth <ss...@apache.org>
Authored: Mon Oct 24 14:50:41 2016 -0700
Committer: Siddharth Seth <ss...@apache.org>
Committed: Mon Oct 24 14:50:41 2016 -0700

----------------------------------------------------------------------
 .../positive/accumulo_predicate_pushdown.q.out  | 131 ++++++-------------
 .../results/positive/accumulo_queries.q.out     | 121 +++++++++--------
 .../accumulo_single_sourced_multi_insert.q.out  |  39 +++---
 itests/qtest-accumulo/pom.xml                   |   7 +
 .../hadoop/hive/accumulo/AccumuloQTestUtil.java |  12 +-
 .../hadoop/hive/accumulo/AccumuloTestSetup.java |  19 ++-
 .../hive/cli/control/CoreAccumuloCliDriver.java |   3 +-
 .../ptest2/conf/deployed/master-mr2.properties  |   2 +-
 .../resources/test-configuration2.properties    |   2 +-
 9 files changed, 170 insertions(+), 166 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ad5d8169/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out b/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out
index eb03041..fc4f9aa 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out
@@ -29,37 +29,22 @@ POSTHOOK: query: -- with full pushdown
 explain select * from accumulo_pushdown where key>'90'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: accumulo_pushdown
-            filterExpr: (key > '90') (type: boolean)
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: (key > '90') (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: accumulo_pushdown
+          filterExpr: (key > '90') (type: boolean)
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: select * from accumulo_pushdown where key>'90'
 PREHOOK: type: QUERY
@@ -185,37 +170,22 @@ POSTHOOK: query: -- with constant expression
 explain select * from accumulo_pushdown where key>=cast(40 + 50 as string)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: accumulo_pushdown
-            filterExpr: (key >= '90') (type: boolean)
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: (key >= '90') (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: accumulo_pushdown
+          filterExpr: (key >= '90') (type: boolean)
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: select * from accumulo_pushdown where key>=cast(40 + 50 as string)
 PREHOOK: type: QUERY
@@ -262,8 +232,8 @@ STAGE PLANS:
                   compressed: false
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
@@ -318,8 +288,8 @@ STAGE PLANS:
                   compressed: false
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
@@ -371,8 +341,8 @@ STAGE PLANS:
                   compressed: false
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
@@ -440,7 +410,7 @@ STAGE PLANS:
             alias: accumulo_pushdown
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (CASE WHEN ((key < '90')) THEN (2) ELSE (4) END > 3) (type: boolean)
+              predicate: (not NVL((key < '90'),false)) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -450,8 +420,8 @@ STAGE PLANS:
                   compressed: false
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
@@ -494,8 +464,8 @@ STAGE PLANS:
                   compressed: false
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
@@ -511,37 +481,22 @@ POSTHOOK: query: explain select * from accumulo_pushdown where key > '281'
 and key < '287'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: accumulo_pushdown
-            filterExpr: ((key > '281') and (key < '287')) (type: boolean)
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: ((key > '281') and (key < '287')) (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: accumulo_pushdown
+          filterExpr: ((key > '281') and (key < '287')) (type: boolean)
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: select * from accumulo_pushdown where key > '281' 
 and key < '287'
@@ -588,8 +543,8 @@ STAGE PLANS:
                   compressed: false
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/ad5d8169/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
index d3c717b..a6d2632 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
@@ -40,9 +40,18 @@ POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-0 is a root stage
+  Stage-1 is a root stage
 
 STAGE PLANS:
   Stage: Stage-0
+      Alter Table Operator:
+        Alter Table
+          type: drop props
+          old name: default.accumulo_table_1
+          properties:
+            COLUMN_STATS_ACCURATE 
+
+  Stage: Stage-1
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -121,7 +130,7 @@ STAGE PLANS:
             alias: accumulo_table_1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: UDFToDouble(key) is not null (type: boolean)
+              predicate: key is not null (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: int)
@@ -136,17 +145,17 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: UDFToDouble(key) is not null (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: UDFToDouble(_col0) (type: double)
                   sort order: +
                   Map-reduce partition columns: UDFToDouble(_col0) (type: double)
-                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: string), _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -155,12 +164,12 @@ STAGE PLANS:
           keys:
             0 UDFToDouble(_col0) (type: double)
             1 UDFToDouble(_col0) (type: double)
-          outputColumnNames: _col2, _col3
-          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+          outputColumnNames: _col1, _col2
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col2 (type: string), _col3 (type: string)
+            expressions: _col1 (type: string), _col2 (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -175,12 +184,13 @@ STAGE PLANS:
             Reduce Output Operator
               key expressions: _col0 (type: string), _col1 (type: string)
               sort order: ++
-              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 20
             Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
@@ -188,8 +198,8 @@ STAGE PLANS:
               compressed: false
               Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
               table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
@@ -269,37 +279,31 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: accumulo_table_1
-            filterExpr: (100 < key) (type: boolean)
+            filterExpr: ((100 < key) and (key < 120)) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: key is not null (type: boolean)
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: key (type: int)
-                outputColumnNames: _col0
+              Reduce Output Operator
+                key expressions: _col0 (type: int)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: int)
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           TableScan
             alias: accumulo_table_2
-            filterExpr: (key < 120) (type: boolean)
+            filterExpr: ((key < 120) and (100 < key)) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: key is not null (type: boolean)
+            Select Operator
+              expressions: key (type: int), value (type: string)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: key (type: int), value (type: string)
-                outputColumnNames: _col0, _col1
+              Reduce Output Operator
+                key expressions: _col0 (type: int)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: int)
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  value expressions: _col1 (type: string)
+                value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -307,10 +311,10 @@ STAGE PLANS:
           keys:
             0 _col0 (type: int)
             1 _col0 (type: int)
-          outputColumnNames: _col2, _col3
+          outputColumnNames: _col1, _col2
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col2 (type: int), _col3 (type: string)
+            expressions: _col1 (type: int), _col2 (type: string)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             File Output Operator
@@ -337,8 +341,8 @@ STAGE PLANS:
             compressed: false
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
@@ -485,30 +489,39 @@ JOIN
 ON (x.key = Y.key)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-0
+      Alter Table Operator:
+        Alter Table
+          type: drop props
+          old name: default.accumulo_table_3
+          properties:
+            COLUMN_STATS_ACCURATE 
+
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: UDFToDouble(key) is not null (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(key)
                 keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -516,7 +529,7 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -524,14 +537,14 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
-  Stage: Stage-0
+  Stage: Stage-1
     Map Reduce
       Map Operator Tree:
           TableScan
             alias: accumulo_table_1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: UDFToDouble(key) is not null (type: boolean)
+              predicate: key is not null (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string)
@@ -548,7 +561,7 @@ STAGE PLANS:
               key expressions: UDFToDouble(_col0) (type: double)
               sort order: +
               Map-reduce partition columns: UDFToDouble(_col0) (type: double)
-              Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Join Operator
@@ -558,14 +571,14 @@ STAGE PLANS:
             0 UDFToDouble(_col0) (type: double)
             1 UDFToDouble(_col0) (type: double)
           outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: int), _col1 (type: string), UDFToInteger(_col3) (type: int)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableInputFormat
                   output format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/ad5d8169/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
index 07af080..5d0d788 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
@@ -35,17 +35,26 @@ insert overwrite table src_x2
 select value,"" where a.key > 50 AND a.key < 100
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
+  Stage-0 is a root stage
+  Stage-2 is a root stage
+  Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6
   Stage-5
-  Stage-6 depends on stages: Stage-5
+  Stage-1 depends on stages: Stage-5, Stage-4, Stage-7
+  Stage-3 depends on stages: Stage-1
+  Stage-4
+  Stage-6
+  Stage-7 depends on stages: Stage-6
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-0
+      Alter Table Operator:
+        Alter Table
+          type: drop props
+          old name: default.src_x2
+          properties:
+            COLUMN_STATS_ACCURATE 
+
+  Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -82,16 +91,16 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe
                       name: default.src_x2
 
-  Stage: Stage-7
+  Stage: Stage-8
     Conditional Operator
 
-  Stage: Stage-4
+  Stage: Stage-5
     Move Operator
       files:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-0
+  Stage: Stage-1
     Move Operator
       tables:
           replace: true
@@ -101,10 +110,10 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.src_x1
 
-  Stage: Stage-2
+  Stage: Stage-3
     Stats-Aggr Operator
 
-  Stage: Stage-3
+  Stage: Stage-4
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -116,7 +125,7 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.src_x1
 
-  Stage: Stage-5
+  Stage: Stage-6
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -128,7 +137,7 @@ STAGE PLANS:
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                   name: default.src_x1
 
-  Stage: Stage-6
+  Stage: Stage-7
     Move Operator
       files:
           hdfs directory: true

http://git-wip-us.apache.org/repos/asf/hive/blob/ad5d8169/itests/qtest-accumulo/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-accumulo/pom.xml b/itests/qtest-accumulo/pom.xml
index a4a5c75..e221347 100644
--- a/itests/qtest-accumulo/pom.xml
+++ b/itests/qtest-accumulo/pom.xml
@@ -113,6 +113,13 @@
       <scope>test</scope>
       <classifier>core</classifier>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-exec</artifactId>
+      <version>${project.version}</version>
+      <scope>test</scope>
+      <classifier>tests</classifier>
+    </dependency>
     <!-- inter-project -->
     <dependency>
       <groupId>junit</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/ad5d8169/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java
index 88bc0bc..749abb5 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java
@@ -17,22 +17,28 @@
  */
 package org.apache.hadoop.hive.accumulo;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.QTestUtil;
 
 /**
  * AccumuloQTestUtil initializes Accumulo-specific test fixtures.
  */
 public class AccumuloQTestUtil extends QTestUtil {
+  AccumuloTestSetup setup = null;
+
   public AccumuloQTestUtil(String outDir, String logDir, MiniClusterType miniMr,
       AccumuloTestSetup setup, String initScript, String cleanupScript) throws Exception {
 
     super(outDir, logDir, miniMr, null, "0.20", initScript, cleanupScript, false, false);
     setup.setupWithHiveConf(conf);
-    super.init();
+    this.setup = setup;
   }
 
   @Override
-  public void init() throws Exception {
-    // defer
+  public void initConf() throws Exception {
+    if (setup != null) {
+      setup.updateConf(conf);
+    }
+    super.initConf();
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ad5d8169/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java b/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java
index 73d5f15..47cf7ac 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.client.admin.TableOperations;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.HiveConf;
 
@@ -54,6 +55,10 @@ public class AccumuloTestSetup  {
       String testTmpDir = System.getProperty("test.tmp.dir");
       File tmpDir = new File(testTmpDir, "accumulo");
 
+      if (tmpDir.exists()) {
+        FileUtils.deleteDirectory(tmpDir);
+      }
+
       MiniAccumuloConfig cfg = new MiniAccumuloConfig(tmpDir, PASSWORD);
       cfg.setNumTservers(1);
 
@@ -64,11 +69,21 @@ public class AccumuloTestSetup  {
       createAccumuloTable(miniCluster.getConnector("root", PASSWORD));
     }
 
+    updateConf(conf);
+  }
+
+  /**
+   * Update hiveConf with the Accumulo specific parameters
+   * @param conf The hiveconf to update
+   */
+  public void updateConf(HiveConf conf) {
     // Setup connection information
     conf.set(AccumuloConnectionParameters.USER_NAME, "root");
     conf.set(AccumuloConnectionParameters.USER_PASS, PASSWORD);
-    conf.set(AccumuloConnectionParameters.ZOOKEEPERS, miniCluster.getZooKeepers());
-    conf.set(AccumuloConnectionParameters.INSTANCE_NAME, miniCluster.getInstanceName());
+    if (miniCluster != null) {
+      conf.set(AccumuloConnectionParameters.ZOOKEEPERS, miniCluster.getZooKeepers());
+      conf.set(AccumuloConnectionParameters.INSTANCE_NAME, miniCluster.getInstanceName());
+    }
   }
 
   protected void createAccumuloTable(Connector conn) throws TableExistsException,

http://git-wip-us.apache.org/repos/asf/hive/blob/ad5d8169/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
index a5d2711..3e4b373 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CoreAccumuloCliDriver.java
@@ -43,8 +43,7 @@ public class CoreAccumuloCliDriver extends CliAdapter {
   @Override
   @AfterClass
   public void shutdown() throws Exception {
-    // FIXME: possibly missing
-    // setup.tearDown();
+    setup.tearDown();
   }
   @Override
   @Before

http://git-wip-us.apache.org/repos/asf/hive/blob/ad5d8169/testutils/ptest2/conf/deployed/master-mr2.properties
----------------------------------------------------------------------
diff --git a/testutils/ptest2/conf/deployed/master-mr2.properties b/testutils/ptest2/conf/deployed/master-mr2.properties
index 02a560e..375d50d 100644
--- a/testutils/ptest2/conf/deployed/master-mr2.properties
+++ b/testutils/ptest2/conf/deployed/master-mr2.properties
@@ -23,7 +23,7 @@ additionalProfiles =
 unitTests.isolate =
 
 # comes from build-command.xml excludes
-unitTests.exclude = TestSerDe TestHiveMetaStore TestHiveServer2Concurrency TestAccumuloCliDriver TestMiniSparkOnYarnCliDriver TestSparkCliDriver TestMultiSessionsHS2WithLocalClusterSpark TestJdbcWithLocalClusterSpark TestEncryptedHDFSCliDriver
+unitTests.exclude = TestSerDe TestHiveMetaStore TestHiveServer2Concurrency TestMiniSparkOnYarnCliDriver TestSparkCliDriver TestMultiSessionsHS2WithLocalClusterSpark TestJdbcWithLocalClusterSpark TestEncryptedHDFSCliDriver
 
 unitTests.batchSize=20
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ad5d8169/testutils/ptest2/src/test/resources/test-configuration2.properties
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/test/resources/test-configuration2.properties b/testutils/ptest2/src/test/resources/test-configuration2.properties
index bb4e77c..0ba2f3b 100644
--- a/testutils/ptest2/src/test/resources/test-configuration2.properties
+++ b/testutils/ptest2/src/test/resources/test-configuration2.properties
@@ -30,7 +30,7 @@ additionalProfiles =
 unitTests.isolate = TestAuthorizationPreEventListener TestDefaultHCatRecord TestDefaultHiveMetastoreAuthorizationProvider TestEmbeddedHiveMetaStore TestExecDriver TestHadoop20SAuthBridge TestHBaseBulkOutputFormat TestHBaseDirectOutputFormat TestHBaseInputFormat TestHBaseMinimrCliDriver TestHCatClient TestHCatDynamicPartitioned TestHCatExternalDynamicPartitioned TestHCatExternalPartitioned TestHCatHiveCompatibility TestHCatHiveThriftCompatibility TestHCatInputFormat TestHCatLoader TestHCatLoaderComplexSchema TestHCatLoaderStorer TestHCatMultiOutputFormat TestHCatNonPartitioned TestHCatOutputFormat TestHCatPartitioned TestHCatPartitionPublish TestHCatRecordSerDe TestHCatSchema TestHCatSchemaUtils TestHCatStorer TestHCatStorerMulti TestHCatStorerWrapper TestHCatUtil TestHdfsAuthorizationProvider TestHive TestHiveClientCache TestHiveMetaStoreWithEnvironmentContext TestHiveRemote TestIDGenerator TestInputJobInfo TestJsonSerDe TestLazyHCatRecord TestMetaStoreAuthorization TestMetaStoreEve
 ntListener TestMsgBusConnection TestMultiOutputFormat TestNotificationListener TestOrcDynamicPartitioned TestOrcHCatLoader TestOrcHCatLoaderComplexSchema TestOrcHCatStorer TestPassProperties TestPermsGrp TestPigHCatUtil TestRCFileMapReduceInputFormat TestReaderWriter TestRemoteHiveMetaStore TestRemoteHiveMetaStoreIpAddress TestRemoteUGIHiveMetaStoreIpAddress TestRevisionManager TestSemanticAnalysis TestSequenceFileReadWrite TestSetUGIOnBothClientServer TestSetUGIOnOnlyClient TestSetUGIOnOnlyServer TestSnapshots TestUseDatabase TestZNodeSetUp
 
 # comes from build-command.xml excludes
-unitTests.exclude = TestSerDe TestHiveMetaStore TestHiveServer2Concurrency TestAccumuloCliDriver
+unitTests.exclude = TestSerDe TestHiveMetaStore TestHiveServer2Concurrency
 
 # module include / exclude list
 # unitTests.modules.include