You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2015/08/18 00:00:31 UTC

[01/50] [abbrv] hive git commit: HIVE-11496: Better tests for evaluating ORC predicate pushdown (Prasanth Jayachandran reviewed by Sergey Shelukhin)

Repository: hive
Updated Branches:
  refs/heads/hbase-metastore 0fa45e4a5 -> 2fe60861d


HIVE-11496: Better tests for evaluating ORC predicate pushdown (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7536edec
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7536edec
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7536edec

Branch: refs/heads/hbase-metastore
Commit: 7536edec1dc39028fca7d53f1a09aa56f9531682
Parents: 3e68cdc
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Sun Aug 9 16:58:52 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Sun Aug 9 16:58:52 2015 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   1 +
 .../ql/hooks/PostExecTezSummaryPrinter.java     |  72 ++
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |  18 +-
 .../hive/ql/io/sarg/ConvertAstToSearchArg.java  |  14 +-
 .../test/queries/clientpositive/orc_ppd_basic.q | 177 +++++
 .../clientpositive/tez/orc_ppd_basic.q.out      | 701 +++++++++++++++++++
 6 files changed, 975 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index c710b0b..bed621d 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -331,6 +331,7 @@ minitez.query.files=bucket_map_join_tez1.q,\
   mapjoin_decimal.q,\
   lvj_mapjoin.q, \
   mrr.q,\
+  orc_ppd_basic.q,\
   tez_bmj_schema_evolution.q,\
   tez_dml.q,\
   tez_fsstat.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
new file mode 100644
index 0000000..60c587f
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.hooks;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.QueryPlan;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.tez.TezTask;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.tez.common.counters.CounterGroup;
+import org.apache.tez.common.counters.TezCounter;
+import org.apache.tez.common.counters.TezCounters;
+
+/**
+ * Post execution hook to print hive tez counters to console error stream.
+ */
+public class PostExecTezSummaryPrinter implements ExecuteWithHookContext {
+  private static final Log LOG = LogFactory.getLog(PostExecTezSummaryPrinter.class.getName());
+
+  @Override
+  public void run(HookContext hookContext) throws Exception {
+    assert (hookContext.getHookType() == HookContext.HookType.POST_EXEC_HOOK);
+    HiveConf conf = hookContext.getConf();
+    if (!"tez".equals(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE))) {
+      return;
+    }
+
+    LOG.info("Executing post execution hook to print tez summary..");
+    SessionState ss = SessionState.get();
+    SessionState.LogHelper console = ss.getConsole();
+    QueryPlan plan = hookContext.getQueryPlan();
+    if (plan == null) {
+      return;
+    }
+
+    List<TezTask> rootTasks = Utilities.getTezTasks(plan.getRootTasks());
+    for (TezTask tezTask : rootTasks) {
+      LOG.info("Printing summary for tez task: " + tezTask.getName());
+      TezCounters counters = tezTask.getTezCounters();
+      if (counters != null) {
+        for (CounterGroup group : counters) {
+          if ("HIVE".equals(group.getDisplayName())) {
+            console.printError(tezTask.getId() + " HIVE COUNTERS:");
+            for (TezCounter counter : group) {
+              console.printError("   " + counter.getDisplayName() + ": " + counter.getValue());
+            }
+          }
+        }
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
index f85420d..0d765b1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
@@ -376,7 +376,7 @@ class RecordReaderImpl implements RecordReader {
       Object predObj = getBaseObjectForComparison(predicate.getType(), baseObj);
 
       result = evaluatePredicateMinMax(predicate, predObj, minValue, maxValue, hasNull);
-      if (bloomFilter != null && result != TruthValue.NO_NULL && result != TruthValue.NO) {
+      if (shouldEvaluateBloomFilter(predicate, result, bloomFilter)) {
         result = evaluatePredicateBloomFilter(predicate, predObj, bloomFilter, hasNull);
       }
       // in case failed conversion, return the default YES_NO_NULL truth value
@@ -394,6 +394,22 @@ class RecordReaderImpl implements RecordReader {
     return result;
   }
 
+  private static boolean shouldEvaluateBloomFilter(PredicateLeaf predicate,
+      TruthValue result, BloomFilterIO bloomFilter) {
+    // evaluate bloom filter only when
+    // 1) Bloom filter is available
+    // 2) Min/Max evaluation yield YES or MAYBE
+    // 3) Predicate is EQUALS or IN list
+    if (bloomFilter != null
+        && result != TruthValue.NO_NULL && result != TruthValue.NO
+        && (predicate.getOperator().equals(PredicateLeaf.Operator.EQUALS)
+            || predicate.getOperator().equals(PredicateLeaf.Operator.NULL_SAFE_EQUALS)
+            || predicate.getOperator().equals(PredicateLeaf.Operator.IN))) {
+      return true;
+    }
+    return false;
+  }
+
   private static TruthValue evaluatePredicateMinMax(PredicateLeaf predicate, Object predObj,
       Object minValue,
       Object maxValue,

http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
index eb8c03f..5c4b7ea 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
@@ -18,14 +18,15 @@
 
 package org.apache.hadoop.hive.ql.io.sarg;
 
-import com.esotericsoftware.kryo.Kryo;
-import com.esotericsoftware.kryo.io.Input;
+import java.sql.Date;
+import java.sql.Timestamp;
+import java.util.List;
+
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.type.HiveChar;
-import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -51,9 +52,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 
-import java.sql.Date;
-import java.sql.Timestamp;
-import java.util.List;
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.io.Input;
 
 public class ConvertAstToSearchArg {
   private static final Log LOG = LogFactory.getLog(ConvertAstToSearchArg.class);
@@ -145,7 +145,7 @@ public class ConvertAstToSearchArg {
         return ((Number) lit).longValue();
       case STRING:
         if (lit instanceof HiveChar) {
-          lit = ((HiveChar) lit).getPaddedValue();
+          return ((HiveChar) lit).getPaddedValue();
         } else if (lit instanceof String) {
           return lit;
         } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/ql/src/test/queries/clientpositive/orc_ppd_basic.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_ppd_basic.q b/ql/src/test/queries/clientpositive/orc_ppd_basic.q
new file mode 100644
index 0000000..f9dafef
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/orc_ppd_basic.q
@@ -0,0 +1,177 @@
+SET hive.fetch.task.conversion=none;
+SET hive.optimize.index.filter=true;
+SET hive.cbo.enable=false;
+
+CREATE TABLE staging(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging;
+
+CREATE TABLE orc_ppd_staging(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           c char(50),
+           v varchar(50),
+           da date,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
+
+insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from staging order by t, s;
+
+-- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
+-- which makes it hard to test bloom filters
+insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11","1990-03-11 10:11:58.703308",-71.54,"aaa" from staging limit 1;
+insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11","2023-03-11 10:11:58.703308",71.54,"zzz" from staging limit 1;
+
+CREATE TABLE orc_ppd(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           c char(50),
+           v varchar(50),
+           da date,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
+
+insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from orc_ppd_staging order by t, s;
+
+SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
+
+-- Row group statistics for column t:
+-- Entry 0: count: 994 hasNull: true min: -10 max: 54 sum: 26014 positions: 0,0,0,0,0,0,0
+-- Entry 1: count: 1000 hasNull: false min: 54 max: 118 sum: 86812 positions: 0,2,124,0,0,116,11
+-- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 0,4,119,0,0,244,19
+
+-- INPUT_RECORDS: 2100 (all row groups)
+select count(*) from orc_ppd;
+
+-- INPUT_RECORDS: 0 (no row groups)
+select count(*) from orc_ppd where t > 127;
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = 55;
+select count(*) from orc_ppd where t <=> 50;
+select count(*) from orc_ppd where t <=> 100;
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t = "54";
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = -10.0;
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = cast(53 as float);
+select count(*) from orc_ppd where t = cast(53 as double);
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t < 100;
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t < 100 and t > 98;
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t <= 100;
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t is null;
+
+-- INPUT_RECORDS: 1100 (2 row groups)
+select count(*) from orc_ppd where t in (5, 120);
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t between 60 and 80;
+
+-- bloom filter tests
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where t = -100;
+select count(*) from orc_ppd where t <=> -100;
+select count(*) from orc_ppd where t = 125;
+select count(*) from orc_ppd where t IN (-100, 125, 200);
+
+-- Row group statistics for column s:
+-- Entry 0: count: 1000 hasNull: false min:  max: zach young sum: 12907 positions: 0,0,0
+-- Entry 1: count: 1000 hasNull: false min: alice allen max: zach zipper sum: 12704 positions: 0,1611,191
+-- Entry 2: count: 100 hasNull: false min: bob davidson max: zzz sum: 1281 positions: 0,3246,373
+
+-- INPUT_RECORDS: 0 (no row groups)
+select count(*) from orc_ppd where s > "zzz";
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where s = "zach young";
+select count(*) from orc_ppd where s <=> "zach zipper";
+select count(*) from orc_ppd where s <=> "";
+
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s is null;
+
+-- INPUT_RECORDS: 2100
+select count(*) from orc_ppd where s is not null;
+
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s = cast("zach young" as char(50));
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where s = cast("zach young" as char(10));
+select count(*) from orc_ppd where s = cast("zach young" as varchar(10));
+select count(*) from orc_ppd where s = cast("zach young" as varchar(50));
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s < "b";
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s > "alice" and s < "bob";
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s in ("alice allen", "");
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s between "" and "alice allen";
+
+-- INPUT_RECORDS: 100 (1 row group)
+select count(*) from orc_ppd where s between "zz" and "zzz";
+
+-- INPUT_RECORDS: 1100 (2 row groups)
+select count(*) from orc_ppd where s between "zach zipper" and "zzz";
+
+-- bloom filter tests
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s = "hello world";
+select count(*) from orc_ppd where s <=> "apache hive";
+select count(*) from orc_ppd where s IN ("a", "z");
+
+-- INPUT_RECORDS: 100
+select count(*) from orc_ppd where s = "sarah ovid";
+
+-- INPUT_RECORDS: 1100
+select count(*) from orc_ppd where s = "wendy king";
+
+-- INPUT_RECORDS: 1000
+select count(*) from orc_ppd where s = "wendy king" and t < 0;
+
+-- INPUT_RECORDS: 100
+select count(*) from orc_ppd where s = "wendy king" and t > 100;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/ql/src/test/results/clientpositive/tez/orc_ppd_basic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_ppd_basic.q.out b/ql/src/test/results/clientpositive/tez/orc_ppd_basic.q.out
new file mode 100644
index 0000000..2d0984b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/orc_ppd_basic.q.out
@@ -0,0 +1,701 @@
+PREHOOK: query: CREATE TABLE staging(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@staging
+POSTHOOK: query: CREATE TABLE staging(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@staging
+PREHOOK: query: CREATE TABLE orc_ppd_staging(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           c char(50),
+           v varchar(50),
+           da date,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: CREATE TABLE orc_ppd_staging(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           c char(50),
+           v varchar(50),
+           da date,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_ppd_staging
+PREHOOK: query: insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from staging order by t, s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from staging order by t, s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@orc_ppd_staging
+POSTHOOK: Lineage: orc_ppd_staging.b SIMPLE [(staging)staging.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.bin SIMPLE [(staging)staging.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.bo SIMPLE [(staging)staging.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.c EXPRESSION [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.d SIMPLE [(staging)staging.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.da EXPRESSION [(staging)staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.dec SIMPLE [(staging)staging.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.f SIMPLE [(staging)staging.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.i SIMPLE [(staging)staging.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.s SIMPLE [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.si SIMPLE [(staging)staging.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.t SIMPLE [(staging)staging.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.ts SIMPLE [(staging)staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.v EXPRESSION [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+PREHOOK: query: -- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
+-- which makes it hard to test bloom filters
+insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11","1990-03-11 10:11:58.703308",-71.54,"aaa" from staging limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: -- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
+-- which makes it hard to test bloom filters
+insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11","1990-03-11 10:11:58.703308",-71.54,"aaa" from staging limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@orc_ppd_staging
+POSTHOOK: Lineage: orc_ppd_staging.b EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.bin EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.bo SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.c EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.d EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.da EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.dec EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.f EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.i EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.s SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.si EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.t EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.ts EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.v EXPRESSION []
+PREHOOK: query: insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11","2023-03-11 10:11:58.703308",71.54,"zzz" from staging limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11","2023-03-11 10:11:58.703308",71.54,"zzz" from staging limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@orc_ppd_staging
+POSTHOOK: Lineage: orc_ppd_staging.b SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.bin EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.bo SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.c EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.d SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.da EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.dec EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.f EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.i SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.s SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.si EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.t EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.ts EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.v EXPRESSION []
+PREHOOK: query: CREATE TABLE orc_ppd(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           c char(50),
+           v varchar(50),
+           da date,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_ppd
+POSTHOOK: query: CREATE TABLE orc_ppd(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           c char(50),
+           v varchar(50),
+           da date,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_ppd
+PREHOOK: query: insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from orc_ppd_staging order by t, s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd_staging
+PREHOOK: Output: default@orc_ppd
+POSTHOOK: query: insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from orc_ppd_staging order by t, s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_ppd_staging
+POSTHOOK: Output: default@orc_ppd
+POSTHOOK: Lineage: orc_ppd.b SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.bin SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.bo SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.c EXPRESSION [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.d SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.da EXPRESSION [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.dec SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: orc_ppd.f SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.i SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.s SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.si SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.t SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.ts SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.v EXPRESSION [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:s, type:string, comment:null), ]
+PREHOOK: query: -- Row group statistics for column t:
+-- Entry 0: count: 994 hasNull: true min: -10 max: 54 sum: 26014 positions: 0,0,0,0,0,0,0
+-- Entry 1: count: 1000 hasNull: false min: 54 max: 118 sum: 86812 positions: 0,2,124,0,0,116,11
+-- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 0,4,119,0,0,244,19
+
+-- INPUT_RECORDS: 2100 (all row groups)
+select count(*) from orc_ppd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 2100
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2100
+PREHOOK: query: -- INPUT_RECORDS: 0 (no row groups)
+select count(*) from orc_ppd where t > 127
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   RECORDS_OUT_0: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = 55
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+8
+PREHOOK: query: select count(*) from orc_ppd where t <=> 50
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+22
+PREHOOK: query: select count(*) from orc_ppd where t <=> 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+16
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t = "54"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 2000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+18
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = -10.0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = cast(53 as float)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+32
+PREHOOK: query: select count(*) from orc_ppd where t = cast(53 as double)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+32
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t < 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 2000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1697
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t < 100 and t > 98
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+12
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 2000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1713
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t is null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: -- INPUT_RECORDS: 1100 (2 row groups)
+select count(*) from orc_ppd where t in (5, 120)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1100
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+50
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t between 60 and 80
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+318
+PREHOOK: query: -- bloom filter tests
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where t = -100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   RECORDS_OUT_0: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where t <=> -100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   RECORDS_OUT_0: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where t = 125
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 0
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where t IN (-100, 125, 200)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 0
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: -- Row group statistics for column s:
+-- Entry 0: count: 1000 hasNull: false min:  max: zach young sum: 12907 positions: 0,0,0
+-- Entry 1: count: 1000 hasNull: false min: alice allen max: zach zipper sum: 12704 positions: 0,1611,191
+-- Entry 2: count: 100 hasNull: false min: bob davidson max: zzz sum: 1281 positions: 0,3246,373
+
+-- INPUT_RECORDS: 0 (no row groups)
+select count(*) from orc_ppd where s > "zzz"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   RECORDS_OUT_0: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where s = "zach young"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: select count(*) from orc_ppd where s <=> "zach zipper"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: select count(*) from orc_ppd where s <=> ""
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: -- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s is null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   RECORDS_OUT_0: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 2100
+select count(*) from orc_ppd where s is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 2100
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2100
+PREHOOK: query: -- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s = cast("zach young" as char(50))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 0
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where s = cast("zach young" as char(10))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as varchar(10))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as varchar(50))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s < "b"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 2000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+81
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s > "alice" and s < "bob"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 2000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+74
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s in ("alice allen", "")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 2000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+12
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s between "" and "alice allen"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 2000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+13
+PREHOOK: query: -- INPUT_RECORDS: 100 (1 row group)
+select count(*) from orc_ppd where s between "zz" and "zzz"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 100
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1
+PREHOOK: query: -- INPUT_RECORDS: 1100 (2 row groups)
+select count(*) from orc_ppd where s between "zach zipper" and "zzz"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1100
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+7
+PREHOOK: query: -- bloom filter tests
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s = "hello world"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 0
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where s <=> "apache hive"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 0
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where s IN ("a", "z")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 0
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 100
+select count(*) from orc_ppd where s = "sarah ovid"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 100
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: -- INPUT_RECORDS: 1100
+select count(*) from orc_ppd where s = "wendy king"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1100
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: -- INPUT_RECORDS: 1000
+select count(*) from orc_ppd where s = "wendy king" and t < 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 1000
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: -- INPUT_RECORDS: 100
+select count(*) from orc_ppd where s = "wendy king" and t > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+   CREATED_FILES: 1
+   DESERIALIZE_ERRORS: 0
+   RECORDS_IN_Map_1: 100
+   RECORDS_OUT_0: 1
+   RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2


[47/50] [abbrv] hive git commit: HIVE-11568 : merge master into branch (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
index 305e979,1292a64..e8cb821
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class NoSuchObjectException extends TException implements org.apache.thrift.TBase<NoSuchObjectException, NoSuchObjectException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class NoSuchObjectException extends TException implements org.apache.thrift.TBase<NoSuchObjectException, NoSuchObjectException._Fields>, java.io.Serializable, Cloneable, Comparable<NoSuchObjectException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchObjectException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
index 92dbb7f,d1c430d..9997b93
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class NoSuchTxnException extends TException implements org.apache.thrift.TBase<NoSuchTxnException, NoSuchTxnException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class NoSuchTxnException extends TException implements org.apache.thrift.TBase<NoSuchTxnException, NoSuchTxnException._Fields>, java.io.Serializable, Cloneable, Comparable<NoSuchTxnException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchTxnException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
index f196c1c,bcf4f51..6f594c5
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class NotificationEvent implements org.apache.thrift.TBase<NotificationEvent, NotificationEvent._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class NotificationEvent implements org.apache.thrift.TBase<NotificationEvent, NotificationEvent._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEvent> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEvent");
  
    private static final org.apache.thrift.protocol.TField EVENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("eventId", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
index 6a8c8ab,c2bc4e8..0c6dc01
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class NotificationEventRequest implements org.apache.thrift.TBase<NotificationEventRequest, NotificationEventRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class NotificationEventRequest implements org.apache.thrift.TBase<NotificationEventRequest, NotificationEventRequest._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEventRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEventRequest");
  
    private static final org.apache.thrift.protocol.TField LAST_EVENT_FIELD_DESC = new org.apache.thrift.protocol.TField("lastEvent", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
index 0123e87,24f9ce4..3295c3c
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class NotificationEventResponse implements org.apache.thrift.TBase<NotificationEventResponse, NotificationEventResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class NotificationEventResponse implements org.apache.thrift.TBase<NotificationEventResponse, NotificationEventResponse._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEventResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEventResponse");
  
    private static final org.apache.thrift.protocol.TField EVENTS_FIELD_DESC = new org.apache.thrift.protocol.TField("events", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
index 0d05378,c5f9ccf..a09575d
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class OpenTxnRequest implements org.apache.thrift.TBase<OpenTxnRequest, OpenTxnRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class OpenTxnRequest implements org.apache.thrift.TBase<OpenTxnRequest, OpenTxnRequest._Fields>, java.io.Serializable, Cloneable, Comparable<OpenTxnRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenTxnRequest");
  
    private static final org.apache.thrift.protocol.TField NUM_TXNS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_txns", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
index f5efbe3,c233422..d874654
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class OpenTxnsResponse implements org.apache.thrift.TBase<OpenTxnsResponse, OpenTxnsResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class OpenTxnsResponse implements org.apache.thrift.TBase<OpenTxnsResponse, OpenTxnsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<OpenTxnsResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenTxnsResponse");
  
    private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_ids", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
index 55d5174,da7bd55..d83b83d
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, java.io.Serializable, Cloneable, Comparable<Order> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Order");
  
    private static final org.apache.thrift.protocol.TField COL_FIELD_DESC = new org.apache.thrift.protocol.TField("col", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
index 7d29d09,224d28e..e38798a
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Partition implements org.apache.thrift.TBase<Partition, Partition._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Partition implements org.apache.thrift.TBase<Partition, Partition._Fields>, java.io.Serializable, Cloneable, Comparable<Partition> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Partition");
  
    private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
index bcd92b8,c50a100..6ccebb3
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PartitionListComposingSpec implements org.apache.thrift.TBase<PartitionListComposingSpec, PartitionListComposingSpec._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PartitionListComposingSpec implements org.apache.thrift.TBase<PartitionListComposingSpec, PartitionListComposingSpec._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionListComposingSpec> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionListComposingSpec");
  
    private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
index 06cc8fb,b5251af..8d2f1b4
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PartitionSpec implements org.apache.thrift.TBase<PartitionSpec, PartitionSpec._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PartitionSpec implements org.apache.thrift.TBase<PartitionSpec, PartitionSpec._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionSpec> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionSpec");
  
    private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
index 18ab134,5574e0b..08d8548
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PartitionSpecWithSharedSD implements org.apache.thrift.TBase<PartitionSpecWithSharedSD, PartitionSpecWithSharedSD._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PartitionSpecWithSharedSD implements org.apache.thrift.TBase<PartitionSpecWithSharedSD, PartitionSpecWithSharedSD._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionSpecWithSharedSD> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionSpecWithSharedSD");
  
    private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
index 193d9e9,e1ec73e..57ff72e
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PartitionWithoutSD implements org.apache.thrift.TBase<PartitionWithoutSD, PartitionWithoutSD._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PartitionWithoutSD implements org.apache.thrift.TBase<PartitionWithoutSD, PartitionWithoutSD._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionWithoutSD> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionWithoutSD");
  
    private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
index c6fa8a2,6149c31..b10f3c8
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PartitionsByExprRequest implements org.apache.thrift.TBase<PartitionsByExprRequest, PartitionsByExprRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PartitionsByExprRequest implements org.apache.thrift.TBase<PartitionsByExprRequest, PartitionsByExprRequest._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsByExprRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsByExprRequest");
  
    private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
index 525ce0e,740f7bd..3a0376d
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PartitionsByExprResult implements org.apache.thrift.TBase<PartitionsByExprResult, PartitionsByExprResult._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PartitionsByExprResult implements org.apache.thrift.TBase<PartitionsByExprResult, PartitionsByExprResult._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsByExprResult> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsByExprResult");
  
    private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
index d224453,5d1ee87..bfa77f6
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PartitionsStatsRequest implements org.apache.thrift.TBase<PartitionsStatsRequest, PartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PartitionsStatsRequest implements org.apache.thrift.TBase<PartitionsStatsRequest, PartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsStatsRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsRequest");
  
    private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
index c9ae14e,da33014..757f209
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PartitionsStatsResult implements org.apache.thrift.TBase<PartitionsStatsResult, PartitionsStatsResult._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PartitionsStatsResult implements org.apache.thrift.TBase<PartitionsStatsResult, PartitionsStatsResult._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsStatsResult> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsResult");
  
    private static final org.apache.thrift.protocol.TField PART_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("partStats", org.apache.thrift.protocol.TType.MAP, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
index 0c9518a,8f29f50..889a41c
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PrincipalPrivilegeSet implements org.apache.thrift.TBase<PrincipalPrivilegeSet, PrincipalPrivilegeSet._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PrincipalPrivilegeSet implements org.apache.thrift.TBase<PrincipalPrivilegeSet, PrincipalPrivilegeSet._Fields>, java.io.Serializable, Cloneable, Comparable<PrincipalPrivilegeSet> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrincipalPrivilegeSet");
  
    private static final org.apache.thrift.protocol.TField USER_PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("userPrivileges", org.apache.thrift.protocol.TType.MAP, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
index 4285ed8,2fd819c..741ace7
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PrivilegeBag implements org.apache.thrift.TBase<PrivilegeBag, PrivilegeBag._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PrivilegeBag implements org.apache.thrift.TBase<PrivilegeBag, PrivilegeBag._Fields>, java.io.Serializable, Cloneable, Comparable<PrivilegeBag> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrivilegeBag");
  
    private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
index 5869457,c04e196..ba52582
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class PrivilegeGrantInfo implements org.apache.thrift.TBase<PrivilegeGrantInfo, PrivilegeGrantInfo._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class PrivilegeGrantInfo implements org.apache.thrift.TBase<PrivilegeGrantInfo, PrivilegeGrantInfo._Fields>, java.io.Serializable, Cloneable, Comparable<PrivilegeGrantInfo> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrivilegeGrantInfo");
  
    private static final org.apache.thrift.protocol.TField PRIVILEGE_FIELD_DESC = new org.apache.thrift.protocol.TField("privilege", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
index c230eab,3b3df25..cffcf91
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ResourceUri implements org.apache.thrift.TBase<ResourceUri, ResourceUri._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ResourceUri implements org.apache.thrift.TBase<ResourceUri, ResourceUri._Fields>, java.io.Serializable, Cloneable, Comparable<ResourceUri> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ResourceUri");
  
    private static final org.apache.thrift.protocol.TField RESOURCE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("resourceType", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
index 35fcf58,5c882d2..b9052a3
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Role implements org.apache.thrift.TBase<Role, Role._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Role implements org.apache.thrift.TBase<Role, Role._Fields>, java.io.Serializable, Cloneable, Comparable<Role> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Role");
  
    private static final org.apache.thrift.protocol.TField ROLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("roleName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
index 8993268,c4beb08..6e3c200
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class RolePrincipalGrant implements org.apache.thrift.TBase<RolePrincipalGrant, RolePrincipalGrant._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class RolePrincipalGrant implements org.apache.thrift.TBase<RolePrincipalGrant, RolePrincipalGrant._Fields>, java.io.Serializable, Cloneable, Comparable<RolePrincipalGrant> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RolePrincipalGrant");
  
    private static final org.apache.thrift.protocol.TField ROLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("roleName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java
index 31ee943,8772180..85af5c5
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Schema implements org.apache.thrift.TBase<Schema, Schema._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Schema implements org.apache.thrift.TBase<Schema, Schema._Fields>, java.io.Serializable, Cloneable, Comparable<Schema> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Schema");
  
    private static final org.apache.thrift.protocol.TField FIELD_SCHEMAS_FIELD_DESC = new org.apache.thrift.protocol.TField("fieldSchemas", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
index 24d65bb,b30e698..73853a2
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class SerDeInfo implements org.apache.thrift.TBase<SerDeInfo, SerDeInfo._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class SerDeInfo implements org.apache.thrift.TBase<SerDeInfo, SerDeInfo._Fields>, java.io.Serializable, Cloneable, Comparable<SerDeInfo> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SerDeInfo");
  
    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
index e62e410,7da298c..d09e413
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class SetPartitionsStatsRequest implements org.apache.thrift.TBase<SetPartitionsStatsRequest, SetPartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class SetPartitionsStatsRequest implements org.apache.thrift.TBase<SetPartitionsStatsRequest, SetPartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<SetPartitionsStatsRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SetPartitionsStatsRequest");
  
    private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
index 28ad1c9,7756384..7d4d12c
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ShowCompactRequest implements org.apache.thrift.TBase<ShowCompactRequest, ShowCompactRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ShowCompactRequest implements org.apache.thrift.TBase<ShowCompactRequest, ShowCompactRequest._Fields>, java.io.Serializable, Cloneable, Comparable<ShowCompactRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowCompactRequest");
  
  

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java
index c882b7b,dd1e857..7112f26
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ShowCompactResponse implements org.apache.thrift.TBase<ShowCompactResponse, ShowCompactResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ShowCompactResponse implements org.apache.thrift.TBase<ShowCompactResponse, ShowCompactResponse._Fields>, java.io.Serializable, Cloneable, Comparable<ShowCompactResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowCompactResponse");
  
    private static final org.apache.thrift.protocol.TField COMPACTS_FIELD_DESC = new org.apache.thrift.protocol.TField("compacts", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
index 365a401,cd7e79e..810b140
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ShowCompactResponseElement implements org.apache.thrift.TBase<ShowCompactResponseElement, ShowCompactResponseElement._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ShowCompactResponseElement implements org.apache.thrift.TBase<ShowCompactResponseElement, ShowCompactResponseElement._Fields>, java.io.Serializable, Cloneable, Comparable<ShowCompactResponseElement> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowCompactResponseElement");
  
    private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java
index 7a1fd6f,122c070..c13fda4
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ShowLocksRequest implements org.apache.thrift.TBase<ShowLocksRequest, ShowLocksRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ShowLocksRequest implements org.apache.thrift.TBase<ShowLocksRequest, ShowLocksRequest._Fields>, java.io.Serializable, Cloneable, Comparable<ShowLocksRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowLocksRequest");
  
  

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java
index 42cfe8c,52b0bbc..2289195
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ShowLocksResponse implements org.apache.thrift.TBase<ShowLocksResponse, ShowLocksResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ShowLocksResponse implements org.apache.thrift.TBase<ShowLocksResponse, ShowLocksResponse._Fields>, java.io.Serializable, Cloneable, Comparable<ShowLocksResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowLocksResponse");
  
    private static final org.apache.thrift.protocol.TField LOCKS_FIELD_DESC = new org.apache.thrift.protocol.TField("locks", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java
index 2f7c24f,8be9b05..ba17ea7
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ShowLocksResponseElement implements org.apache.thrift.TBase<ShowLocksResponseElement, ShowLocksResponseElement._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ShowLocksResponseElement implements org.apache.thrift.TBase<ShowLocksResponseElement, ShowLocksResponseElement._Fields>, java.io.Serializable, Cloneable, Comparable<ShowLocksResponseElement> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowLocksResponseElement");
  
    private static final org.apache.thrift.protocol.TField LOCKID_FIELD_DESC = new org.apache.thrift.protocol.TField("lockid", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
index ab5c0ed,bc64495..5cb5e2b
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class SkewedInfo implements org.apache.thrift.TBase<SkewedInfo, SkewedInfo._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class SkewedInfo implements org.apache.thrift.TBase<SkewedInfo, SkewedInfo._Fields>, java.io.Serializable, Cloneable, Comparable<SkewedInfo> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SkewedInfo");
  
    private static final org.apache.thrift.protocol.TField SKEWED_COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("skewedColNames", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
index 813b4f0,165a879..6b5cf87
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class StorageDescriptor implements org.apache.thrift.TBase<StorageDescriptor, StorageDescriptor._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class StorageDescriptor implements org.apache.thrift.TBase<StorageDescriptor, StorageDescriptor._Fields>, java.io.Serializable, Cloneable, Comparable<StorageDescriptor> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StorageDescriptor");
  
    private static final org.apache.thrift.protocol.TField COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("cols", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java
index db3274a,9906ff3..2160bc8
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class StringColumnStatsData implements org.apache.thrift.TBase<StringColumnStatsData, StringColumnStatsData._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class StringColumnStatsData implements org.apache.thrift.TBase<StringColumnStatsData, StringColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<StringColumnStatsData> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StringColumnStatsData");
  
    private static final org.apache.thrift.protocol.TField MAX_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField("maxColLen", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
index 484bd6a,51b9e38..ca16924
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, java.io.Serializable, Cloneable, Comparable<Table> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Table");
  
    private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
index 2073829,1edcaf9..11d3b03
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class TableStatsRequest implements org.apache.thrift.TBase<TableStatsRequest, TableStatsRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class TableStatsRequest implements org.apache.thrift.TBase<TableStatsRequest, TableStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<TableStatsRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsRequest");
  
    private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
index 541370f,25a1f25..f1104e1
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class TableStatsResult implements org.apache.thrift.TBase<TableStatsResult, TableStatsResult._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class TableStatsResult implements org.apache.thrift.TBase<TableStatsResult, TableStatsResult._Fields>, java.io.Serializable, Cloneable, Comparable<TableStatsResult> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsResult");
  
    private static final org.apache.thrift.protocol.TField TABLE_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("tableStats", org.apache.thrift.protocol.TType.LIST, (short)1);


[06/50] [abbrv] hive git commit: HIVE-11457: Vectorization: Improve GenVectorCode string equals intrinsic (Gopal V, reviewed by Matt McCline)

Posted by se...@apache.org.
HIVE-11457: Vectorization: Improve GenVectorCode string equals intrinsic (Gopal V, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bddbd1da
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bddbd1da
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bddbd1da

Branch: refs/heads/hbase-metastore
Commit: bddbd1da0c570a4c03f80b695b940a181787c5ca
Parents: cfda570
Author: Gopal V <go...@apache.org>
Authored: Mon Aug 10 13:55:29 2015 -0700
Committer: Gopal V <go...@apache.org>
Committed: Mon Aug 10 13:56:00 2015 -0700

----------------------------------------------------------------------
 .../ql/exec/vector/expressions/StringExpr.java  | 49 +++++++++++++++-----
 1 file changed, 38 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bddbd1da/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java
index ebeb642..90817a5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java
@@ -51,22 +51,49 @@ public class StringExpr {
    * Use lexicographic unsigned byte value order.
    * This is what's used for UTF-8 sort order.
    */
-  public static boolean equal(byte[] arg1, int start1, int len1, byte[] arg2, int start2, int len2) {
+  public static boolean equal(byte[] arg1, final int start1, final int len1,
+      byte[] arg2, final int start2, final int len2) {
     if (len1 != len2) {
       return false;
     }
-    for (int index1 = start1,
-             index2 = start2;
-         len1 > 0;
-         len1--,
-         index1++,
-         index2++) {
-      // Note the "& 0xff" is just a way to convert unsigned bytes to signed integer.
-      if ((arg1[index1] & 0xff) != (arg2[index2] & 0xff)) {
-        return false;
-      }
+    if (len1 == 0) {
+      return true;
+    }
+
+    // do bounds check for OOB exception
+    if (arg1[start1] != arg2[start2]
+        || arg1[start1 + len1 - 1] != arg2[start2 + len2 - 1]) {
+      return false;
+    }
 
+    if (len1 == len2) {
+      // prove invariant to the compiler: len1 = len2
+      // all array access between (start1, start1+len1) 
+      // and (start2, start2+len2) are valid
+      // no more OOB exceptions are possible
+      final int step = 8;
+      final int remainder = len1 % step;
+      final int wlen = len1 - remainder;
+      // suffix first
+      for (int i = wlen; i < len1; i++) {
+        if (arg1[start1 + i] != arg2[start2 + i]) {
+          return false;
+        }
+      }
+      // SIMD loop
+      for (int i = 0; i < wlen; i += step) {
+        final int s1 = start1 + i;
+        final int s2 = start2 + i;
+        boolean neq = false;
+        for (int j = 0; j < step; j++) {
+          neq = (arg1[s1 + j] != arg2[s2 + j]) || neq;
+        }
+        if (neq) {
+          return false;
+        }
+      }
     }
+
     return true;
   }
 


[13/50] [abbrv] hive git commit: HIVE-11278 : Fix Partition.setOutputFormatClass to set class name properly (Rajat Khandelwal, reviewed by Amareshwari)

Posted by se...@apache.org.
HIVE-11278 : Fix Partition.setOutputFormatClass to set class name properly (Rajat Khandelwal, reviewed by Amareshwari)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7e536853
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7e536853
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7e536853

Branch: refs/heads/hbase-metastore
Commit: 7e53685310fa22abd12307e141511184fa6ede3a
Parents: 70631bb
Author: Rajat Khandelwal <pr...@apache.org>
Authored: Tue Aug 11 17:28:07 2015 +0530
Committer: Amareshwari Sriramadasu <am...@apache.org>
Committed: Tue Aug 11 17:28:07 2015 +0530

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7e536853/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
index 2e77bc4..9546191 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
@@ -283,7 +283,7 @@ public class Partition implements Serializable {
   public void setOutputFormatClass(Class<? extends HiveOutputFormat> outputFormatClass) {
     this.outputFormatClass = outputFormatClass;
     tPartition.getSd().setOutputFormat(HiveFileFormatUtils
-        .getOutputFormatSubstitute(outputFormatClass).toString());
+        .getOutputFormatSubstitute(outputFormatClass).getName());
   }
 
   final public Class<? extends InputFormat> getInputFormatClass()


[35/50] [abbrv] hive git commit: HIVE-11562: Typo in hive-log4j2.xml throws unknown level exception (Prasanth Jayachandran reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11562: Typo in hive-log4j2.xml throws unknown level exception (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d307abbf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d307abbf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d307abbf

Branch: refs/heads/hbase-metastore
Commit: d307abbf1093ffdc2599489bd6f67bdb8dcb3e14
Parents: c93d6c7
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Fri Aug 14 14:01:57 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Fri Aug 14 14:01:57 2015 -0700

----------------------------------------------------------------------
 data/conf/hive-log4j2.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d307abbf/data/conf/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/data/conf/hive-log4j2.xml b/data/conf/hive-log4j2.xml
index c9adfa2..a40be2c 100644
--- a/data/conf/hive-log4j2.xml
+++ b/data/conf/hive-log4j2.xml
@@ -70,7 +70,7 @@
 
   <Loggers>
     <Root level="${sys:hive.log.threshold}">
-      <AppenderRef ref="${sys:hive.root.logger}" level="{sys:hive.log.level}"/>
+      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
       <AppenderRef ref="EventCounter" />
     </Root>
 


[24/50] [abbrv] hive git commit: HIVE-11103 Add banker's rounding BROUND UDF (Alexander Pivovarov, reviewed by Jason Dere)

Posted by se...@apache.org.
HIVE-11103 Add banker's rounding BROUND UDF (Alexander Pivovarov, reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bd90fc34
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bd90fc34
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bd90fc34

Branch: refs/heads/hbase-metastore
Commit: bd90fc349fcd171a58f583928408d029af9dbf4e
Parents: 139101d
Author: Alexander Pivovarov <ap...@gmail.com>
Authored: Sun Jul 26 21:43:31 2015 -0700
Committer: Alexander Pivovarov <ap...@gmail.com>
Committed: Thu Aug 13 10:59:01 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ant/GenVectorCode.java   |   2 +
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   1 +
 .../BRoundWithNumDigitsDoubleToDouble.java      |  42 ++++
 .../ql/exec/vector/expressions/DecimalUtil.java |  18 ++
 ...FuncBRoundWithNumDigitsDecimalToDecimal.java |  40 ++++
 .../FuncRoundWithNumDigitsDecimalToDecimal.java |  14 +-
 .../ql/exec/vector/expressions/MathExpr.java    |  22 ++
 .../hive/ql/optimizer/physical/Vectorizer.java  |   1 +
 .../hive/ql/udf/generic/GenericUDFBRound.java   |  68 +++++++
 .../hive/ql/udf/generic/GenericUDFRound.java    |  41 ++--
 .../hadoop/hive/ql/udf/generic/RoundUtils.java  |  14 ++
 .../exec/vector/TestVectorizationContext.java   |  15 ++
 .../ql/udf/generic/TestGenericUDFBRound.java    | 202 +++++++++++++++++++
 ql/src/test/queries/clientpositive/udf_bround.q |  44 ++++
 .../test/queries/clientpositive/vector_bround.q |  14 ++
 .../results/clientpositive/show_functions.q.out |   1 +
 .../results/clientpositive/udf_bround.q.out     | 119 +++++++++++
 .../results/clientpositive/vector_bround.q.out  |  86 ++++++++
 .../hadoop/hive/common/type/HiveDecimal.java    |   1 +
 19 files changed, 727 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
----------------------------------------------------------------------
diff --git a/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java b/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
index 54f3783..fede273 100644
--- a/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
+++ b/ant/src/org/apache/hadoop/hive/ant/GenVectorCode.java
@@ -696,6 +696,7 @@ public class GenVectorCode extends Task {
       // template, <ClassNamePrefix>, <ReturnType>, <OperandType>, <FuncName>, <OperandCast>,
       //   <ResultCast>, <Cleanup> <VectorExprArgType>
       {"ColumnUnaryFunc", "FuncRound", "double", "double", "MathExpr.round", "", "", "", ""},
+      {"ColumnUnaryFunc", "FuncBRound", "double", "double", "MathExpr.bround", "", "", "", ""},
       // round(longCol) returns a long and is a no-op. So it will not be implemented here.
       // round(Col, N) is a special case and will be implemented separately from this template
       {"ColumnUnaryFunc", "FuncFloor", "long", "double", "Math.floor", "", "(long)", "", ""},
@@ -752,6 +753,7 @@ public class GenVectorCode extends Task {
       {"DecimalColumnUnaryFunc", "FuncAbs", "decimal", "DecimalUtil.abs"},
       {"DecimalColumnUnaryFunc", "FuncSign", "long", "DecimalUtil.sign"},
       {"DecimalColumnUnaryFunc", "FuncRound", "decimal", "DecimalUtil.round"},
+      {"DecimalColumnUnaryFunc", "FuncBRound", "decimal", "DecimalUtil.bround"},
       {"DecimalColumnUnaryFunc", "FuncNegate", "decimal", "DecimalUtil.negate"},
 
       // Casts

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
index fb06d44..9edcc4d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
@@ -192,6 +192,7 @@ public final class FunctionRegistry {
     system.registerGenericUDF("size", GenericUDFSize.class);
 
     system.registerGenericUDF("round", GenericUDFRound.class);
+    system.registerGenericUDF("bround", GenericUDFBRound.class);
     system.registerGenericUDF("floor", GenericUDFFloor.class);
     system.registerUDF("sqrt", UDFSqrt.class, false);
     system.registerGenericUDF("cbrt", GenericUDFCbrt.class);

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/BRoundWithNumDigitsDoubleToDouble.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/BRoundWithNumDigitsDoubleToDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/BRoundWithNumDigitsDoubleToDouble.java
new file mode 100644
index 0000000..0a49e45
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/BRoundWithNumDigitsDoubleToDouble.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.expressions;
+
+import org.apache.hadoop.hive.ql.udf.generic.RoundUtils;
+
+// Vectorized implementation of BROUND(Col, N) function
+public class BRoundWithNumDigitsDoubleToDouble extends RoundWithNumDigitsDoubleToDouble
+    implements ISetLongArg {
+  private static final long serialVersionUID = 18493485928L;
+
+  public BRoundWithNumDigitsDoubleToDouble(int colNum, long scalarVal, int outputColumn) {
+    super(colNum, scalarVal, outputColumn);
+  }
+
+  public BRoundWithNumDigitsDoubleToDouble() {
+    super();
+  }
+
+  // Round to the specified number of decimal places using half-even round function.
+  @Override
+  public double func(double d) {
+    return RoundUtils.bround(d, getDecimalPlaces().get());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java
index ef80059..a01f7a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/DecimalUtil.java
@@ -314,6 +314,24 @@ public class DecimalUtil {
     }
   }
 
+  public static void bround(int i, HiveDecimalWritable input, int decimalPlaces, DecimalColumnVector outputColVector) {
+    try {
+      outputColVector.set(i, RoundUtils.bround(input.getHiveDecimal(), decimalPlaces));
+    } catch (ArithmeticException e) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[i] = true;
+    }
+  }
+
+  public static void bround(int i, HiveDecimalWritable input, DecimalColumnVector outputColVector) {
+    try {
+      outputColVector.set(i, RoundUtils.bround(input.getHiveDecimal(), outputColVector.scale));
+    } catch (ArithmeticException e) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[i] = true;
+    }
+  }
+
   public static void sign(int i, HiveDecimal input, LongColumnVector outputColVector) {
     outputColVector.vector[i] = input.signum();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncBRoundWithNumDigitsDecimalToDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncBRoundWithNumDigitsDecimalToDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncBRoundWithNumDigitsDecimalToDecimal.java
new file mode 100644
index 0000000..e174575
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncBRoundWithNumDigitsDecimalToDecimal.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.expressions;
+
+import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+
+public class FuncBRoundWithNumDigitsDecimalToDecimal extends FuncRoundWithNumDigitsDecimalToDecimal {
+  private static final long serialVersionUID = 1865384957262L;
+
+  public FuncBRoundWithNumDigitsDecimalToDecimal(int colNum, int scalarValue, int outputColumn) {
+    super(colNum, scalarValue, outputColumn);
+  }
+
+  public FuncBRoundWithNumDigitsDecimalToDecimal() {
+    super();
+  }
+
+  @Override
+  protected void round(int i, HiveDecimalWritable input, int decimalPlaces,
+      DecimalColumnVector outputColVector) {
+    DecimalUtil.bround(i, input, decimalPlaces, outputColVector);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java
index 9f3e8a3..a18bb55 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FuncRoundWithNumDigitsDecimalToDecimal.java
@@ -73,7 +73,7 @@ public class FuncRoundWithNumDigitsDecimalToDecimal extends VectorExpression {
       // All must be selected otherwise size would be zero
       // Repeating property will not change.
       outputIsNull[0] = inputIsNull[0];
-      DecimalUtil.round(0, vector[0], decimalPlaces, outputColVector);
+      round(0, vector[0], decimalPlaces, outputColVector);
       outputColVector.isRepeating = true;
     } else if (inputColVector.noNulls) {
       if (batch.selectedInUse) {
@@ -82,14 +82,14 @@ public class FuncRoundWithNumDigitsDecimalToDecimal extends VectorExpression {
 
           // Set isNull because decimal operation can yield a null.
           outputIsNull[i] = false;
-          DecimalUtil.round(i, vector[i], decimalPlaces, outputColVector);
+          round(i, vector[i], decimalPlaces, outputColVector);
         }
       } else {
 
         // Set isNull because decimal operation can yield a null.
         Arrays.fill(outputIsNull, 0, n, false);
         for(int i = 0; i != n; i++) {
-          DecimalUtil.round(i, vector[i], decimalPlaces, outputColVector);
+          round(i, vector[i], decimalPlaces, outputColVector);
         }
       }
       outputColVector.isRepeating = false;
@@ -98,12 +98,12 @@ public class FuncRoundWithNumDigitsDecimalToDecimal extends VectorExpression {
         for(int j = 0; j != n; j++) {
           int i = sel[j];
           outputIsNull[i] = inputIsNull[i];
-          DecimalUtil.round(i, vector[i], decimalPlaces, outputColVector);
+          round(i, vector[i], decimalPlaces, outputColVector);
         }
       } else {
         System.arraycopy(inputIsNull, 0, outputIsNull, 0, n);
         for(int i = 0; i != n; i++) {
-          DecimalUtil.round(i, vector[i], decimalPlaces, outputColVector);
+          round(i, vector[i], decimalPlaces, outputColVector);
         }
       }
       outputColVector.isRepeating = false;
@@ -133,4 +133,8 @@ public class FuncRoundWithNumDigitsDecimalToDecimal extends VectorExpression {
             VectorExpressionDescriptor.InputExpressionType.COLUMN,
             VectorExpressionDescriptor.InputExpressionType.SCALAR).build();
   }
+
+  protected void round(int i, HiveDecimalWritable input, int decimalPlaces, DecimalColumnVector outputColVector) {
+    DecimalUtil.round(i, input, decimalPlaces, outputColVector);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathExpr.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathExpr.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathExpr.java
index aef923e..67bf567 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathExpr.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/MathExpr.java
@@ -35,6 +35,28 @@ public class MathExpr {
     }
   }
 
+  // Round using the "half-even" method used in Hive.
+  public static double bround(double d) {
+    long intPart = (long) (d);
+    if (d > 0.0) {
+      if (d - intPart == 0.5d) {
+        if (intPart % 2 == 0) {
+          return intPart;
+        }
+        return intPart + 1;
+      }
+      return (double) ((long) (d + 0.5d));
+    } else {
+      if (intPart - d == 0.5d) {
+        if (intPart % 2 == 0) {
+          return intPart;
+        }
+        return intPart - 1;
+      }
+      return (double) ((long) (d - 0.5d));
+    }
+  }
+
   public static double log2(double d) {
     return Math.log(d) / Math.log(2);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 82c3e50..7ecd50a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -250,6 +250,7 @@ public class Vectorizer implements PhysicalPlanResolver {
     supportedGenericUDFs.add(UDFLog.class);
     supportedGenericUDFs.add(GenericUDFPower.class);
     supportedGenericUDFs.add(GenericUDFRound.class);
+    supportedGenericUDFs.add(GenericUDFBRound.class);
     supportedGenericUDFs.add(GenericUDFPosMod.class);
     supportedGenericUDFs.add(UDFSqrt.class);
     supportedGenericUDFs.add(UDFSign.class);

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBRound.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBRound.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBRound.java
new file mode 100644
index 0000000..4a59eb3
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBRound.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.BRoundWithNumDigitsDoubleToDouble;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.FuncBRoundWithNumDigitsDecimalToDecimal;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncBRoundDecimalToDecimal;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncBRoundDoubleToDouble;
+import org.apache.hadoop.hive.serde2.io.DoubleWritable;
+
+@Description(name = "bround",
+value = "_FUNC_(x[, d]) - round x to d decimal places using HALF_EVEN rounding mode.",
+extended = "Banker's rounding. The value is rounded to the nearest even number. Also known as Gaussian rounding.\n"
+  + "Example:\n"
+  + "  > SELECT _FUNC_(12.25, 1);\n  12.2")
+@VectorizedExpressions({ FuncBRoundDoubleToDouble.class, BRoundWithNumDigitsDoubleToDouble.class,
+    FuncBRoundWithNumDigitsDecimalToDecimal.class, FuncBRoundDecimalToDecimal.class })
+public class GenericUDFBRound extends GenericUDFRound {
+
+  @Override
+  protected HiveDecimal round(HiveDecimal input, int scale) {
+    return RoundUtils.bround(input, scale);
+  }
+
+  @Override
+  protected long round(long input, int scale) {
+    return RoundUtils.bround(input, scale);
+  }
+
+  @Override
+  protected double round(double input, int scale) {
+    return RoundUtils.bround(input, scale);
+  }
+
+  @Override
+  protected DoubleWritable round(DoubleWritable input, int scale) {
+    double d = input.get();
+    if (Double.isNaN(d) || Double.isInfinite(d)) {
+      return new DoubleWritable(d);
+    } else {
+      return new DoubleWritable(RoundUtils.bround(d, scale));
+    }
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    return getStandardDisplayString("bround", children);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRound.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRound.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRound.java
index 963e4a8..ae81fe3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRound.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRound.java
@@ -101,34 +101,37 @@ public class GenericUDFRound extends GenericUDF {
         break;
       case BYTE:
         if (!(scaleOI instanceof WritableConstantByteObjectInspector)) {
-          throw new UDFArgumentTypeException(1, "ROUND second argument only takes constant");
+          throw new UDFArgumentTypeException(1, getFuncName().toUpperCase() + " second argument only takes constant");
         }
         scale = ((WritableConstantByteObjectInspector)scaleOI).getWritableConstantValue().get();
         break;
       case SHORT:
         if (!(scaleOI instanceof WritableConstantShortObjectInspector)) {
-          throw new UDFArgumentTypeException(1, "ROUND second argument only takes constant");
+          throw new UDFArgumentTypeException(1, getFuncName().toUpperCase() + " second argument only takes constant");
         }
         scale = ((WritableConstantShortObjectInspector)scaleOI).getWritableConstantValue().get();
         break;
       case INT:
         if (!(scaleOI instanceof WritableConstantIntObjectInspector)) {
-          throw new UDFArgumentTypeException(1, "ROUND second argument only takes constant");
+          throw new UDFArgumentTypeException(1, getFuncName().toUpperCase() + " second argument only takes constant");
         }
         scale = ((WritableConstantIntObjectInspector)scaleOI).getWritableConstantValue().get();
         break;
       case LONG:
         if (!(scaleOI instanceof WritableConstantLongObjectInspector)) {
-          throw new UDFArgumentTypeException(1, "ROUND second argument only takes constant");
+          throw new UDFArgumentTypeException(1, getFuncName().toUpperCase()
+              + " second argument only takes constant");
         }
         long l = ((WritableConstantLongObjectInspector)scaleOI).getWritableConstantValue().get();
         if (l < Integer.MIN_VALUE || l > Integer.MAX_VALUE) {
-          throw new UDFArgumentException("ROUND scale argument out of allowed range");
+          throw new UDFArgumentException(getFuncName().toUpperCase()
+              + " scale argument out of allowed range");
         }
         scale = (int)l;
         break;
       default:
-        throw new UDFArgumentTypeException(1, "ROUND second argument only takes integer constant");
+        throw new UDFArgumentTypeException(1, getFuncName().toUpperCase()
+            + " second argument only takes integer constant");
       }
     }
 
@@ -199,7 +202,7 @@ public class GenericUDFRound extends GenericUDF {
       return null;
     case DECIMAL:
       HiveDecimalWritable decimalWritable = (HiveDecimalWritable) inputOI.getPrimitiveWritableObject(input);
-      HiveDecimal dec = RoundUtils.round(decimalWritable.getHiveDecimal(), scale);
+      HiveDecimal dec = round(decimalWritable.getHiveDecimal(), scale);
       if (dec == null) {
         return null;
       }
@@ -209,32 +212,32 @@ public class GenericUDFRound extends GenericUDF {
       if (scale >= 0) {
         return byteWritable;
       } else {
-        return new ByteWritable((byte)RoundUtils.round(byteWritable.get(), scale));
+        return new ByteWritable((byte)round(byteWritable.get(), scale));
       }
     case SHORT:
       ShortWritable shortWritable = (ShortWritable)inputOI.getPrimitiveWritableObject(input);
       if (scale >= 0) {
         return shortWritable;
       } else {
-        return new ShortWritable((short)RoundUtils.round(shortWritable.get(), scale));
+        return new ShortWritable((short)round(shortWritable.get(), scale));
       }
     case INT:
       IntWritable intWritable = (IntWritable)inputOI.getPrimitiveWritableObject(input);
       if (scale >= 0) {
         return intWritable;
       } else {
-        return new IntWritable((int)RoundUtils.round(intWritable.get(), scale));
+        return new IntWritable((int)round(intWritable.get(), scale));
       }
     case LONG:
       LongWritable longWritable = (LongWritable)inputOI.getPrimitiveWritableObject(input);
       if (scale >= 0) {
         return longWritable;
       } else {
-        return new LongWritable(RoundUtils.round(longWritable.get(), scale));
+        return new LongWritable(round(longWritable.get(), scale));
       }
     case FLOAT:
       float f = ((FloatWritable)inputOI.getPrimitiveWritableObject(input)).get();
-      return new FloatWritable((float)RoundUtils.round(f, scale));
+      return new FloatWritable((float)round(f, scale));
      case DOUBLE:
        return round(((DoubleWritable)inputOI.getPrimitiveWritableObject(input)), scale);
     case STRING:
@@ -252,7 +255,19 @@ public class GenericUDFRound extends GenericUDF {
     }
   }
 
-  private static DoubleWritable round(DoubleWritable input, int scale) {
+  protected HiveDecimal round(HiveDecimal input, int scale) {
+    return RoundUtils.round(input, scale);
+  }
+
+  protected long round(long input, int scale) {
+    return RoundUtils.round(input, scale);
+  }
+
+  protected double round(double input, int scale) {
+    return RoundUtils.round(input, scale);
+  }
+
+  protected DoubleWritable round(DoubleWritable input, int scale) {
     double d = input.get();
     if (Double.isNaN(d) || Double.isInfinite(d)) {
       return new DoubleWritable(d);

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/RoundUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/RoundUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/RoundUtils.java
index 0b389a5..7fd1641 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/RoundUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/RoundUtils.java
@@ -49,12 +49,26 @@ public class RoundUtils {
     return BigDecimal.valueOf(input).setScale(scale, RoundingMode.HALF_UP).doubleValue();
   }
 
+  public static double bround(double input, int scale) {
+    if (Double.isNaN(input) || Double.isInfinite(input)) {
+      return input;
+    }
+    return BigDecimal.valueOf(input).setScale(scale, RoundingMode.HALF_EVEN).doubleValue();
+  }
+
   public static long round(long input, int scale) {
     return BigDecimal.valueOf(input).setScale(scale, RoundingMode.HALF_UP).longValue();
   }
 
+  public static long bround(long input, int scale) {
+    return BigDecimal.valueOf(input).setScale(scale, RoundingMode.HALF_EVEN).longValue();
+  }
+
   public static HiveDecimal round(HiveDecimal input, int scale) {
     return input.setScale(scale, HiveDecimal.ROUND_HALF_UP);
   }
 
+  public static HiveDecimal bround(HiveDecimal input, int scale) {
+    return input.setScale(scale, HiveDecimal.ROUND_HALF_EVEN);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java
index 98a8c3e..8470c47 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorizationContext.java
@@ -32,6 +32,7 @@ import junit.framework.Assert;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.BRoundWithNumDigitsDoubleToDouble;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.ColAndCol;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.ColOrCol;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.DoubleColumnInList;
@@ -94,6 +95,7 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FilterVarCharColumn
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FilterVarCharColumnNotBetween;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FilterCharColumnBetween;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FilterCharColumnNotBetween;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncBRoundDoubleToDouble;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncLnDoubleToDouble;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncRoundDoubleToDouble;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncSinDoubleToDouble;
@@ -114,6 +116,7 @@ import org.apache.hadoop.hive.ql.udf.UDFLog;
 import org.apache.hadoop.hive.ql.udf.UDFSin;
 import org.apache.hadoop.hive.ql.udf.UDFYear;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBRound;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIf;
@@ -989,6 +992,12 @@ public class TestVectorizationContext {
     ve = vc.getVectorExpression(mathFuncExpr);
     Assert.assertEquals(FuncRoundDoubleToDouble.class, ve.getClass());
 
+    // BRound without digits
+    GenericUDFBRound udfBRound = new GenericUDFBRound();
+    mathFuncExpr.setGenericUDF(udfBRound);
+    ve = vc.getVectorExpression(mathFuncExpr);
+    Assert.assertEquals(FuncBRoundDoubleToDouble.class, ve.getClass());
+
     // Round with digits
     mathFuncExpr.setGenericUDF(udfRound);
     children2.add(new ExprNodeConstantDesc(4));
@@ -997,6 +1006,12 @@ public class TestVectorizationContext {
     Assert.assertEquals(RoundWithNumDigitsDoubleToDouble.class, ve.getClass());
     Assert.assertEquals(4, ((RoundWithNumDigitsDoubleToDouble) ve).getDecimalPlaces().get());
 
+    // BRound with digits
+    mathFuncExpr.setGenericUDF(udfBRound);
+    ve = vc.getVectorExpression(mathFuncExpr);
+    Assert.assertEquals(BRoundWithNumDigitsDoubleToDouble.class, ve.getClass());
+    Assert.assertEquals(4, ((BRoundWithNumDigitsDoubleToDouble) ve).getDecimalPlaces().get());
+
     // Log with int base
     gudfBridge = new GenericUDFBridge("log", false, UDFLog.class.getName());
     mathFuncExpr.setGenericUDF(gudfBridge);

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFBRound.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFBRound.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFBRound.java
new file mode 100644
index 0000000..d526dd8
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFBRound.java
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import static java.math.BigDecimal.ROUND_HALF_EVEN;
+
+import java.math.BigDecimal;
+
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.MathExpr;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredJavaObject;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject;
+import org.apache.hadoop.hive.serde2.io.DoubleWritable;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.io.FloatWritable;
+import org.apache.hadoop.io.IntWritable;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestGenericUDFBRound {
+
+  @Test
+  public void testDouble() throws HiveException {
+    GenericUDFBRound udf = new GenericUDFBRound();
+    ObjectInspector valueOI0 = PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
+
+    IntWritable scale = new IntWritable(0);
+    ObjectInspector valueOI1 = PrimitiveObjectInspectorFactory
+        .getPrimitiveWritableConstantObjectInspector(TypeInfoFactory.intTypeInfo, scale);
+
+    ObjectInspector[] arguments = { valueOI0, valueOI1 };
+
+    udf.initialize(arguments);
+
+    runDouble(2.5, scale, 2.0, udf);
+    runDouble(3.5, scale, 4.0, udf);
+
+    runDouble(2.49, scale, 2.0, udf);
+    runDouble(3.49, scale, 3.0, udf);
+
+    runDouble(2.51, scale, 3.0, udf);
+    runDouble(3.51, scale, 4.0, udf);
+
+    runDouble(2.4, scale, 2.0, udf);
+    runDouble(3.4, scale, 3.0, udf);
+
+    runDouble(2.6, scale, 3.0, udf);
+    runDouble(3.6, scale, 4.0, udf);
+  }
+
+  @Test
+  public void testDoubleScaleMinus1() throws HiveException {
+    GenericUDFBRound udf = new GenericUDFBRound();
+    ObjectInspector valueOI0 = PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
+
+    IntWritable scale = new IntWritable(-1);
+    ObjectInspector valueOI1 = PrimitiveObjectInspectorFactory
+        .getPrimitiveWritableConstantObjectInspector(TypeInfoFactory.intTypeInfo, scale);
+
+    ObjectInspector[] arguments = { valueOI0, valueOI1 };
+
+    udf.initialize(arguments);
+
+    runDouble(55.0, scale, 60.0, udf);
+    runDouble(45.0, scale, 40.0, udf);
+
+    runDouble(54.9, scale, 50.0, udf);
+    runDouble(44.9, scale, 40.0, udf);
+
+    runDouble(55.1, scale, 60.0, udf);
+    runDouble(45.1, scale, 50.0, udf);
+
+    runDouble(-55.0, scale, -60.0, udf);
+    runDouble(-45.0, scale, -40.0, udf);
+
+    runDouble(-54.9, scale, -50.0, udf);
+    runDouble(-44.9, scale, -40.0, udf);
+
+    runDouble(-55.1, scale, -60.0, udf);
+    runDouble(-45.1, scale, -50.0, udf);
+  }
+
+  @Test
+  public void testFloat() throws HiveException {
+    GenericUDFBRound udf = new GenericUDFBRound();
+    ObjectInspector valueOI0 = PrimitiveObjectInspectorFactory.writableFloatObjectInspector;
+
+    IntWritable scale = new IntWritable(0);
+    ObjectInspector valueOI1 = PrimitiveObjectInspectorFactory
+        .getPrimitiveWritableConstantObjectInspector(TypeInfoFactory.intTypeInfo, scale);
+
+    ObjectInspector[] arguments = { valueOI0, valueOI1 };
+
+    udf.initialize(arguments);
+
+    runFloat(2.5f, scale, 2.0f, udf);
+    runFloat(3.5f, scale, 4.0f, udf);
+
+    runFloat(2.49f, scale, 2.0f, udf);
+    runFloat(3.49f, scale, 3.0f, udf);
+
+    runFloat(2.51f, scale, 3.0f, udf);
+    runFloat(3.51f, scale, 4.0f, udf);
+
+    runFloat(2.4f, scale, 2.0f, udf);
+    runFloat(3.4f, scale, 3.0f, udf);
+
+    runFloat(2.6f, scale, 3.0f, udf);
+    runFloat(3.6f, scale, 4.0f, udf);
+  }
+
+  @Test
+  public void testDecimal() throws HiveException {
+    GenericUDFBRound udf = new GenericUDFBRound();
+    ObjectInspector valueOI0 = PrimitiveObjectInspectorFactory.writableHiveDecimalObjectInspector;
+
+    IntWritable scale = new IntWritable(0);
+    ObjectInspector valueOI1 = PrimitiveObjectInspectorFactory
+        .getPrimitiveWritableConstantObjectInspector(TypeInfoFactory.intTypeInfo, scale);
+
+    ObjectInspector[] arguments = { valueOI0, valueOI1 };
+
+    udf.initialize(arguments);
+
+    runDecimal(2.5, scale, 2.0, udf);
+    runDecimal(3.5, scale, 4.0, udf);
+
+    runDecimal(2.49, scale, 2.0, udf);
+    runDecimal(3.49, scale, 3.0, udf);
+
+    runDecimal(2.51, scale, 3.0, udf);
+    runDecimal(3.51, scale, 4.0, udf);
+
+    runDecimal(2.4, scale, 2.0, udf);
+    runDecimal(3.4, scale, 3.0, udf);
+
+    runDecimal(2.6, scale, 3.0, udf);
+    runDecimal(3.6, scale, 4.0, udf);
+  }
+
+  @Test
+  public void testMathExprBround() throws HiveException {
+    double[] vArr = { 1.5, 2.5, -1.5, -2.5, 1.49, 1.51 };
+    for (double v : vArr) {
+      double v1 = RoundUtils.bround(v, 0);
+      double v2 = MathExpr.bround(v);
+      Assert.assertEquals(v1, v2, 0.00001);
+
+      double v3 = BigDecimal.valueOf(v).setScale(0, ROUND_HALF_EVEN).doubleValue();
+      Assert.assertEquals(v3, v2, 0.00001);
+    }
+  }
+
+  private void runDouble(double v, IntWritable scale, Double expV, GenericUDF udf)
+      throws HiveException {
+    DeferredObject valueObj0 = new DeferredJavaObject(new DoubleWritable(v));
+    DeferredObject valueObj1 = new DeferredJavaObject(scale);
+    DeferredObject[] args = { valueObj0, valueObj1 };
+    DoubleWritable output = (DoubleWritable) udf.evaluate(args);
+    Assert.assertEquals("bround() test ", expV.doubleValue(), output.get(), 0.00001);
+  }
+
+  private void runFloat(float v, IntWritable scale, Float expV, GenericUDF udf)
+      throws HiveException {
+    DeferredObject valueObj0 = new DeferredJavaObject(new FloatWritable(v));
+    DeferredObject valueObj1 = new DeferredJavaObject(scale);
+    DeferredObject[] args = { valueObj0, valueObj1 };
+    FloatWritable output = (FloatWritable) udf.evaluate(args);
+    Assert.assertEquals("bround() test ", expV.floatValue(), output.get(), 0.001f);
+  }
+
+  private void runDecimal(double v, IntWritable scale, Double expV, GenericUDF udf)
+      throws HiveException {
+    HiveDecimal hd = HiveDecimal.create(BigDecimal.valueOf(v));
+    DeferredObject valueObj0 = new DeferredJavaObject(new HiveDecimalWritable(hd));
+    DeferredObject valueObj1 = new DeferredJavaObject(scale);
+    DeferredObject[] args = { valueObj0, valueObj1 };
+    HiveDecimalWritable output = (HiveDecimalWritable) udf.evaluate(args);
+    Assert.assertEquals("bround() test ", expV.doubleValue(),
+        output.getHiveDecimal().doubleValue(), 0.00001);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/test/queries/clientpositive/udf_bround.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/udf_bround.q b/ql/src/test/queries/clientpositive/udf_bround.q
new file mode 100644
index 0000000..ef2c33a
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/udf_bround.q
@@ -0,0 +1,44 @@
+set hive.fetch.task.conversion=more;
+
+DESCRIBE FUNCTION bround;
+DESC FUNCTION EXTENDED bround;
+
+select
+bround(2.5),
+bround(3.5),
+bround(2.49),
+bround(3.49),
+bround(2.51),
+bround(3.51);
+
+select
+bround(1.25, 1),
+bround(1.35, 1),
+bround(1.249, 1),
+bround(1.349, 1),
+bround(1.251, 1),
+bround(1.351, 1);
+
+select
+bround(-1.25, 1),
+bround(-1.35, 1),
+bround(-1.249, 1),
+bround(-1.349, 1),
+bround(-1.251, 1),
+bround(-1.351, 1);
+
+select
+bround(55.0, -1),
+bround(45.0, -1),
+bround(54.9, -1),
+bround(44.9, -1),
+bround(55.1, -1),
+bround(45.1, -1);
+
+select
+bround(-55.0, -1),
+bround(-45.0, -1),
+bround(-54.9, -1),
+bround(-44.9, -1),
+bround(-55.1, -1),
+bround(-45.1, -1);

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/test/queries/clientpositive/vector_bround.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_bround.q b/ql/src/test/queries/clientpositive/vector_bround.q
new file mode 100644
index 0000000..380d51c
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_bround.q
@@ -0,0 +1,14 @@
+create table test_vector_bround(v0 double, v1 double) stored as orc;
+insert into table test_vector_bround
+values
+(2.5, 1.25),
+(3.5, 1.35),
+(-2.5, -1.25),
+(-3.5, -1.35),
+(2.49, 1.249),
+(3.49, 1.349),
+(2.51, 1.251),
+(3.51, 1.351);
+set hive.vectorized.execution.enabled=true;
+explain select bround(v0), bround(v1, 1) from test_vector_bround;
+select bround(v0), bround(v1, 1) from test_vector_bround;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/test/results/clientpositive/show_functions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_functions.q.out b/ql/src/test/results/clientpositive/show_functions.q.out
index 5de4ffc..540079b 100644
--- a/ql/src/test/results/clientpositive/show_functions.q.out
+++ b/ql/src/test/results/clientpositive/show_functions.q.out
@@ -33,6 +33,7 @@ avg
 base64
 between
 bin
+bround
 case
 cbrt
 ceil

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/test/results/clientpositive/udf_bround.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_bround.q.out b/ql/src/test/results/clientpositive/udf_bround.q.out
new file mode 100644
index 0000000..4dcea6d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/udf_bround.q.out
@@ -0,0 +1,119 @@
+PREHOOK: query: DESCRIBE FUNCTION bround
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: DESCRIBE FUNCTION bround
+POSTHOOK: type: DESCFUNCTION
+bround(x[, d]) - round x to d decimal places using HALF_EVEN rounding mode.
+PREHOOK: query: DESC FUNCTION EXTENDED bround
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: DESC FUNCTION EXTENDED bround
+POSTHOOK: type: DESCFUNCTION
+bround(x[, d]) - round x to d decimal places using HALF_EVEN rounding mode.
+Banker's rounding. The value is rounded to the nearest even number. Also known as Gaussian rounding.
+Example:
+  > SELECT bround(12.25, 1);
+  12.2
+PREHOOK: query: select
+bround(2.5),
+bround(3.5),
+bround(2.49),
+bround(3.49),
+bround(2.51),
+bround(3.51)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+bround(2.5),
+bround(3.5),
+bround(2.49),
+bround(3.49),
+bround(2.51),
+bround(3.51)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+2.0	4.0	2.0	3.0	3.0	4.0
+PREHOOK: query: select
+bround(1.25, 1),
+bround(1.35, 1),
+bround(1.249, 1),
+bround(1.349, 1),
+bround(1.251, 1),
+bround(1.351, 1)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+bround(1.25, 1),
+bround(1.35, 1),
+bround(1.249, 1),
+bround(1.349, 1),
+bround(1.251, 1),
+bround(1.351, 1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+1.2	1.4	1.2	1.3	1.3	1.4
+PREHOOK: query: select
+bround(-1.25, 1),
+bround(-1.35, 1),
+bround(-1.249, 1),
+bround(-1.349, 1),
+bround(-1.251, 1),
+bround(-1.351, 1)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+bround(-1.25, 1),
+bround(-1.35, 1),
+bround(-1.249, 1),
+bround(-1.349, 1),
+bround(-1.251, 1),
+bround(-1.351, 1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+-1.2	-1.4	-1.2	-1.3	-1.3	-1.4
+PREHOOK: query: select
+bround(55.0, -1),
+bround(45.0, -1),
+bround(54.9, -1),
+bround(44.9, -1),
+bround(55.1, -1),
+bround(45.1, -1)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+bround(55.0, -1),
+bround(45.0, -1),
+bround(54.9, -1),
+bround(44.9, -1),
+bround(55.1, -1),
+bround(45.1, -1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+60.0	40.0	50.0	40.0	60.0	50.0
+PREHOOK: query: select
+bround(-55.0, -1),
+bround(-45.0, -1),
+bround(-54.9, -1),
+bround(-44.9, -1),
+bround(-55.1, -1),
+bround(-45.1, -1)
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select
+bround(-55.0, -1),
+bround(-45.0, -1),
+bround(-54.9, -1),
+bround(-44.9, -1),
+bround(-55.1, -1),
+bround(-45.1, -1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+-60.0	-40.0	-50.0	-40.0	-60.0	-50.0

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/ql/src/test/results/clientpositive/vector_bround.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_bround.q.out b/ql/src/test/results/clientpositive/vector_bround.q.out
new file mode 100644
index 0000000..85049a8
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_bround.q.out
@@ -0,0 +1,86 @@
+PREHOOK: query: create table test_vector_bround(v0 double, v1 double) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_vector_bround
+POSTHOOK: query: create table test_vector_bround(v0 double, v1 double) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_vector_bround
+PREHOOK: query: insert into table test_vector_bround
+values
+(2.5, 1.25),
+(3.5, 1.35),
+(-2.5, -1.25),
+(-3.5, -1.35),
+(2.49, 1.249),
+(3.49, 1.349),
+(2.51, 1.251),
+(3.51, 1.351)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@test_vector_bround
+POSTHOOK: query: insert into table test_vector_bround
+values
+(2.5, 1.25),
+(3.5, 1.35),
+(-2.5, -1.25),
+(-3.5, -1.35),
+(2.49, 1.249),
+(3.49, 1.349),
+(2.51, 1.251),
+(3.51, 1.351)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@test_vector_bround
+POSTHOOK: Lineage: test_vector_bround.v0 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: test_vector_bround.v1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: explain select bround(v0), bround(v1, 1) from test_vector_bround
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select bround(v0), bround(v1, 1) from test_vector_bround
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: test_vector_bround
+            Statistics: Num rows: 8 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: bround(v0) (type: double), bround(v1, 1) (type: double)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 8 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 8 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select bround(v0), bround(v1, 1) from test_vector_bround
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_vector_bround
+#### A masked pattern was here ####
+POSTHOOK: query: select bround(v0), bround(v1, 1) from test_vector_bround
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_vector_bround
+#### A masked pattern was here ####
+2.0	1.2
+4.0	1.4
+-2.0	-1.2
+-4.0	-1.4
+2.0	1.2
+3.0	1.3
+3.0	1.3
+4.0	1.4

http://git-wip-us.apache.org/repos/asf/hive/blob/bd90fc34/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
index 7d7fb28..12a3936 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
@@ -51,6 +51,7 @@ public class HiveDecimal implements Comparable<HiveDecimal> {
   public static final int ROUND_FLOOR = BigDecimal.ROUND_FLOOR;
   public static final int ROUND_CEILING = BigDecimal.ROUND_CEILING;
   public static final int ROUND_HALF_UP = BigDecimal.ROUND_HALF_UP;
+  public static final int ROUND_HALF_EVEN = BigDecimal.ROUND_HALF_EVEN;
 
   private BigDecimal bd = BigDecimal.ZERO;
 


[41/50] [abbrv] hive git commit: HIVE-11549: Hide Hive configuration from spark driver launching process (reviewed by Chao)

Posted by se...@apache.org.
HIVE-11549: Hide Hive configuration from spark driver launching process (reviewed by Chao)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fe1efe52
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fe1efe52
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fe1efe52

Branch: refs/heads/hbase-metastore
Commit: fe1efe520b807f24085d87fea8fc1c5ee0c9e44c
Parents: 2ccd061
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Fri Aug 14 15:30:52 2015 -0700
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Fri Aug 14 15:30:52 2015 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hive/spark/client/SparkClientImpl.java  | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/fe1efe52/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
----------------------------------------------------------------------
diff --git a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
index e1e64a7..2546a46 100644
--- a/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
+++ b/spark-client/src/main/java/org/apache/hive/spark/client/SparkClientImpl.java
@@ -423,6 +423,10 @@ class SparkClientImpl implements SparkClient {
       LOG.info("Running client driver with argv: {}", cmd);
       ProcessBuilder pb = new ProcessBuilder("sh", "-c", cmd);
 
+      // Prevent hive configurations from being visible in Spark.
+      pb.environment().remove("HIVE_HOME");
+      pb.environment().remove("HIVE_CONF_DIR");
+
       if (isTesting != null) {
         pb.environment().put("SPARK_TESTING", isTesting);
       }


[39/50] [abbrv] hive git commit: HIVE-11570: Fix PTest2 log4j2.version (Gopal V, via Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11570: Fix PTest2 log4j2.version (Gopal V, via Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/17e95c7c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/17e95c7c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/17e95c7c

Branch: refs/heads/hbase-metastore
Commit: 17e95c7c761525a4d7596099cfca179db2a19a20
Parents: e8b2c60
Author: Gopal V <go...@apache.org>
Authored: Fri Aug 14 15:22:44 2015 -0700
Committer: Gopal V <go...@apache.org>
Committed: Fri Aug 14 15:22:44 2015 -0700

----------------------------------------------------------------------
 testutils/ptest2/pom.xml | 16 +---------------
 1 file changed, 1 insertion(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/17e95c7c/testutils/ptest2/pom.xml
----------------------------------------------------------------------
diff --git a/testutils/ptest2/pom.xml b/testutils/ptest2/pom.xml
index 2cf7f45..fade125 100644
--- a/testutils/ptest2/pom.xml
+++ b/testutils/ptest2/pom.xml
@@ -26,6 +26,7 @@ limitations under the License.
   <name>hive-ptest</name>
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    <log4j2.version>2.3</log4j2.version>
   </properties>
 
   <repositories>
@@ -84,21 +85,6 @@ limitations under the License.
       <version>${log4j2.version}</version>
     </dependency>
     <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <version>1.2.17</version>
-      <exclusions>
-        <exclusion>
-          <groupId>com.sun.jdmk</groupId>
-          <artifactId>jmxtools</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jmx</groupId>
-          <artifactId>jmxri</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
       <groupId>org.apache.httpcomponents</groupId>
       <artifactId>httpclient</artifactId>
       <version>4.2.5</version>


[20/50] [abbrv] hive git commit: HIVE-11480: CBO: Calcite Operator To Hive Operator (Calcite Return Path): char/varchar as input to GenericUDAF (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
HIVE-11480: CBO: Calcite Operator To Hive Operator (Calcite Return Path): char/varchar as input to GenericUDAF (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0140df74
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0140df74
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0140df74

Branch: refs/heads/hbase-metastore
Commit: 0140df748f6714cc327132f008a13f6af5e41397
Parents: c4ceefb
Author: Pengcheng Xiong <px...@apache.org>
Authored: Wed Aug 12 10:43:35 2015 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Wed Aug 12 10:43:35 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java | 2 ++
 .../org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java | 2 ++
 2 files changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0140df74/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java
index 159a2fe..071884c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java
@@ -55,6 +55,8 @@ public class GenericUDAFStd extends GenericUDAFVariance {
     case FLOAT:
     case DOUBLE:
     case STRING:
+    case VARCHAR:
+    case CHAR:
     case TIMESTAMP:
     case DECIMAL:
       return new GenericUDAFStdEvaluator();

http://git-wip-us.apache.org/repos/asf/hive/blob/0140df74/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
index 3545390..2950605 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
@@ -72,6 +72,8 @@ public class GenericUDAFVariance extends AbstractGenericUDAFResolver {
     case FLOAT:
     case DOUBLE:
     case STRING:
+    case VARCHAR:
+    case CHAR:
     case TIMESTAMP:
     case DECIMAL:
       return new GenericUDAFVarianceEvaluator();


[22/50] [abbrv] hive git commit: HIVE-10435 : Make HiveSession implementation pluggable through configuration (Akshay Goyal, reviewed by Amareshwari)

Posted by se...@apache.org.
HIVE-10435 : Make HiveSession implementation pluggable through configuration (Akshay Goyal, reviewed by Amareshwari)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5b67f352
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5b67f352
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5b67f352

Branch: refs/heads/hbase-metastore
Commit: 5b67f35232cf9f9323e999fbbaac0a8ddffc6954
Parents: 16546cc
Author: Akshay Goyal <ak...@gmail.com>
Authored: Thu Aug 13 16:12:14 2015 +0530
Committer: Amareshwari Sriramadasu <am...@apache.org>
Committed: Thu Aug 13 16:12:14 2015 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  3 ++
 .../service/cli/session/SessionManager.java     | 42 +++++++++++++--
 .../session/TestPluggableHiveSessionImpl.java   | 55 ++++++++++++++++++++
 3 files changed, 95 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5b67f352/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 9cc7987..d1cb5fb 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1869,6 +1869,9 @@ public class HiveConf extends Configuration {
         new TimeValidator(TimeUnit.MILLISECONDS),
         "Time that HiveServer2 will wait before responding to asynchronous calls that use long polling"),
 
+    HIVE_SESSION_IMPL_CLASSNAME("hive.session.impl.classname", null, "Classname for custom implementation of hive session"),
+    HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME("hive.session.impl.withugi.classname", null, "Classname for custom implementation of hive session with UGI"),
+
     // HiveServer2 auth configuration
     HIVE_SERVER2_AUTHENTICATION("hive.server2.authentication", "NONE",
       new StringSet("NOSASL", "NONE", "LDAP", "KERBEROS", "PAM", "CUSTOM"),

http://git-wip-us.apache.org/repos/asf/hive/blob/5b67f352/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
index c46bbce..77c5e66 100644
--- a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
+++ b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
@@ -20,6 +20,7 @@ package org.apache.hive.service.cli.session;
 
 import java.io.File;
 import java.io.IOException;
+import java.lang.reflect.Constructor;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.List;
@@ -67,6 +68,8 @@ public class SessionManager extends CompositeService {
   private volatile boolean shutdown;
   // The HiveServer2 instance running this service
   private final HiveServer2 hiveServer2;
+  private String sessionImplWithUGIclassName;
+  private String sessionImplclassName;
 
   public SessionManager(HiveServer2 hiveServer2) {
     super(SessionManager.class.getSimpleName());
@@ -82,9 +85,15 @@ public class SessionManager extends CompositeService {
     }
     createBackgroundOperationPool();
     addService(operationManager);
+    initSessionImplClassName();
     super.init(hiveConf);
   }
 
+  private void initSessionImplClassName() {
+    this.sessionImplclassName = hiveConf.getVar(ConfVars.HIVE_SESSION_IMPL_CLASSNAME);
+    this.sessionImplWithUGIclassName = hiveConf.getVar(ConfVars.HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME);
+  }
+
   private void createBackgroundOperationPool() {
     int poolSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS);
     LOG.info("HiveServer2: Background operation thread pool size: " + poolSize);
@@ -245,12 +254,35 @@ public class SessionManager extends CompositeService {
     // If doAs is set to true for HiveServer2, we will create a proxy object for the session impl.
     // Within the proxy object, we wrap the method call in a UserGroupInformation#doAs
     if (withImpersonation) {
-      HiveSessionImplwithUGI sessionWithUGI = new HiveSessionImplwithUGI(protocol, username, password,
-          hiveConf, ipAddress, delegationToken);
-      session = HiveSessionProxy.getProxy(sessionWithUGI, sessionWithUGI.getSessionUgi());
-      sessionWithUGI.setProxySession(session);
+      HiveSessionImplwithUGI hiveSessionUgi;
+      if (sessionImplWithUGIclassName == null) {
+        hiveSessionUgi = new HiveSessionImplwithUGI(protocol, username, password,
+            hiveConf, ipAddress, delegationToken);
+      } else {
+        try {
+          Class<?> clazz = Class.forName(sessionImplWithUGIclassName);
+          Constructor<?> constructor = clazz.getConstructor(String.class, String.class, Map.class, String.class);
+          hiveSessionUgi = (HiveSessionImplwithUGI) constructor.newInstance(new Object[]
+              {protocol, username, password, hiveConf, ipAddress, delegationToken});
+        } catch (Exception e) {
+          throw new HiveSQLException("Cannot initilize session class:" + sessionImplWithUGIclassName);
+        }
+      }
+      session = HiveSessionProxy.getProxy(hiveSessionUgi, hiveSessionUgi.getSessionUgi());
+      hiveSessionUgi.setProxySession(session);
     } else {
-      session = new HiveSessionImpl(protocol, username, password, hiveConf, ipAddress);
+      if (sessionImplclassName == null) {
+        session = new HiveSessionImpl(protocol, username, password, hiveConf, ipAddress);
+      } else {
+        try {
+          Class<?> clazz = Class.forName(sessionImplclassName);
+          Constructor<?> constructor = clazz.getConstructor(String.class, String.class, Map.class);
+          session = (HiveSession) constructor.newInstance(new Object[]
+              {protocol, username, password, hiveConf, ipAddress});
+        } catch (Exception e) {
+          throw new HiveSQLException("Cannot initilize session class:" + sessionImplclassName);
+        }
+      }
     }
     session.setSessionManager(this);
     session.setOperationManager(operationManager);

http://git-wip-us.apache.org/repos/asf/hive/blob/5b67f352/service/src/test/org/apache/hive/service/cli/session/TestPluggableHiveSessionImpl.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/cli/session/TestPluggableHiveSessionImpl.java b/service/src/test/org/apache/hive/service/cli/session/TestPluggableHiveSessionImpl.java
new file mode 100644
index 0000000..8c7546c
--- /dev/null
+++ b/service/src/test/org/apache/hive/service/cli/session/TestPluggableHiveSessionImpl.java
@@ -0,0 +1,55 @@
+package org.apache.hive.service.cli.session;
+
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hive.service.cli.CLIService;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.SessionHandle;
+import org.apache.hive.service.cli.thrift.TProtocolVersion;
+import org.apache.hive.service.cli.thrift.ThriftBinaryCLIService;
+import org.apache.hive.service.cli.thrift.ThriftCLIService;
+import org.apache.hive.service.cli.thrift.ThriftCLIServiceClient;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestPluggableHiveSessionImpl extends TestCase {
+
+  private HiveConf hiveConf;
+  private CLIService cliService;
+  private ThriftCLIServiceClient client;
+  private ThriftCLIService service;
+
+  @Override
+  @Before
+  public void setUp() {
+    hiveConf = new HiveConf();
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_SESSION_IMPL_CLASSNAME, TestHiveSessionImpl.class.getName());
+    cliService = new CLIService(null);
+    service = new ThriftBinaryCLIService(cliService);
+    service.init(hiveConf);
+    client = new ThriftCLIServiceClient(service);
+  }
+
+
+  @Test
+  public void testSessionImpl() {
+    SessionHandle sessionHandle = null;
+    try {
+      sessionHandle = client.openSession("tom", "password");
+      Assert.assertEquals(TestHiveSessionImpl.class.getName(),
+              service.getHiveConf().getVar(HiveConf.ConfVars.HIVE_SESSION_IMPL_CLASSNAME));
+      Assert.assertTrue(cliService.getSessionManager().getSession(sessionHandle) instanceof TestHiveSessionImpl);
+      client.closeSession(sessionHandle);
+    } catch (HiveSQLException e) {
+      e.printStackTrace();
+    }
+  }
+
+  class TestHiveSessionImpl extends HiveSessionImpl {
+
+    public TestHiveSessionImpl(TProtocolVersion protocol, String username, String password, HiveConf serverhiveConf, String ipAddress) {
+      super(protocol, username, password, serverhiveConf, ipAddress);
+    }
+  }
+}


[36/50] [abbrv] hive git commit: HIVE-11563: Perflogger loglines are repeated (Prasanth Jayachandran reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11563: Perflogger loglines are repeated (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0fab86c9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0fab86c9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0fab86c9

Branch: refs/heads/hbase-metastore
Commit: 0fab86c9d7730275cb9962c24ce9d2f92f0c7150
Parents: d307abb
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Fri Aug 14 14:02:51 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Fri Aug 14 14:02:51 2015 -0700

----------------------------------------------------------------------
 data/conf/hive-log4j2.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0fab86c9/data/conf/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/data/conf/hive-log4j2.xml b/data/conf/hive-log4j2.xml
index a40be2c..ff8e4d3 100644
--- a/data/conf/hive-log4j2.xml
+++ b/data/conf/hive-log4j2.xml
@@ -95,7 +95,7 @@
     <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
       <AppenderRef ref="${sys:hive.root.logger}"/>
     </Logger>
-    <Logger name="org.apache.hadoop.hive.ql.log.PerfLogger" level="${sys:hive.ql.log.PerfLogger.level}">
+    <Logger name="org.apache.hadoop.hive.ql.log.PerfLogger" level="${sys:hive.ql.log.PerfLogger.level}" additivity="false">
       <AppenderRef ref="${sys:hive.root.logger}"/>
     </Logger>
     <Logger name="org.apache.hadoop.hive.ql.exec.Operator" level="INFO">


[21/50] [abbrv] hive git commit: HIVE-11462: Constant fold struct() UDF (Gopal V, reviewed by Hari Sankar Sivarama Subramaniyan)

Posted by se...@apache.org.
HIVE-11462: Constant fold struct() UDF (Gopal V, reviewed by Hari Sankar Sivarama Subramaniyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/16546cc4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/16546cc4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/16546cc4

Branch: refs/heads/hbase-metastore
Commit: 16546cc4b8f6944f5ea4ad13f480dcc402e6757c
Parents: 0140df7
Author: Gopal V <go...@apache.org>
Authored: Wed Aug 12 16:45:58 2015 -0700
Committer: Gopal V <go...@apache.org>
Committed: Wed Aug 12 16:45:58 2015 -0700

----------------------------------------------------------------------
 .../optimizer/ConstantPropagateProcFactory.java | 40 +++++++++++++++-----
 .../hive/ql/plan/ExprNodeConstantDesc.java      | 29 ++++++++++++--
 .../hive/ql/udf/generic/GenericUDFIn.java       |  3 +-
 .../hive/ql/udf/generic/GenericUDFStruct.java   | 25 +++++++++---
 .../test/results/clientpositive/null_cast.q.out |  2 +-
 .../test/results/clientpositive/structin.q.out  |  2 +-
 .../results/clientpositive/udf_inline.q.out     |  2 +-
 .../results/clientpositive/udf_struct.q.out     |  2 +-
 .../test/results/clientpositive/udf_union.q.out |  2 +-
 .../objectinspector/ObjectInspectorUtils.java   |  3 ++
 10 files changed, 87 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/16546cc4/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
index cf10c52..55ad0ce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
@@ -75,10 +75,14 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen;
 import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.StandardConstantStructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
@@ -758,6 +762,10 @@ public final class ConstantPropagateProcFactory {
             return null;
           }
         }
+        if (constant.getTypeInfo().getCategory() != Category.PRIMITIVE) {
+          // nested complex types cannot be folded cleanly 
+          return null;
+        }
         Object value = constant.getValue();
         PrimitiveTypeInfo pti = (PrimitiveTypeInfo) constant.getTypeInfo();
         Object writableValue = null == value ? value :
@@ -774,6 +782,10 @@ public final class ConstantPropagateProcFactory {
           return null;
         }
         ExprNodeConstantDesc constant = (ExprNodeConstantDesc) evaluatedFn;
+        if (constant.getTypeInfo().getCategory() != Category.PRIMITIVE) {
+          // nested complex types cannot be folded cleanly
+          return null;
+        }
         Object writableValue = PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(
           (PrimitiveTypeInfo) constant.getTypeInfo()).getPrimitiveWritableObject(constant.getValue());
         arguments[i] = new DeferredJavaObject(writableValue);
@@ -790,28 +802,38 @@ public final class ConstantPropagateProcFactory {
         LOG.debug(udf.getClass().getName() + "(" + exprs + ")=" + o);
       }
       if (o == null) {
-        return new ExprNodeConstantDesc(TypeInfoUtils.getTypeInfoFromObjectInspector(oi), o);
+        return new ExprNodeConstantDesc(
+            TypeInfoUtils.getTypeInfoFromObjectInspector(oi), o);
       }
       Class<?> clz = o.getClass();
       if (PrimitiveObjectInspectorUtils.isPrimitiveWritableClass(clz)) {
         PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
         TypeInfo typeInfo = poi.getTypeInfo();
         o = poi.getPrimitiveJavaObject(o);
-        if (typeInfo.getTypeName().contains(serdeConstants.DECIMAL_TYPE_NAME) ||
-            typeInfo.getTypeName().contains(serdeConstants.VARCHAR_TYPE_NAME) ||
-            typeInfo.getTypeName().contains(serdeConstants.CHAR_TYPE_NAME)) {
+        if (typeInfo.getTypeName().contains(serdeConstants.DECIMAL_TYPE_NAME)
+            || typeInfo.getTypeName()
+                .contains(serdeConstants.VARCHAR_TYPE_NAME)
+            || typeInfo.getTypeName().contains(serdeConstants.CHAR_TYPE_NAME)) {
           return new ExprNodeConstantDesc(typeInfo, o);
         }
-      } else if (PrimitiveObjectInspectorUtils.isPrimitiveJavaClass(clz)) {
-
-      } else {
+      } else if (udf instanceof GenericUDFStruct
+          && oi instanceof StandardConstantStructObjectInspector) {
+        // do not fold named_struct, only struct()
+        ConstantObjectInspector coi = (ConstantObjectInspector) oi;
+        TypeInfo structType = TypeInfoUtils.getTypeInfoFromObjectInspector(coi);
+        return new ExprNodeConstantDesc(structType,
+            ObjectInspectorUtils.copyToStandardJavaObject(o, coi));
+      } else if (!PrimitiveObjectInspectorUtils.isPrimitiveJavaClass(clz)) {
         if (LOG.isErrorEnabled()) {
-          LOG.error("Unable to evaluate " + udf + ". Return value unrecoginizable.");
+          LOG.error("Unable to evaluate " + udf
+              + ". Return value unrecoginizable.");
         }
         return null;
+      } else {
+        // fall through
       }
       String constStr = null;
-      if(arguments.length == 1 && FunctionRegistry.isOpCast(udf)) {
+      if (arguments.length == 1 && FunctionRegistry.isOpCast(udf)) {
         // remember original string representation of constant.
         constStr = arguments[0].get().toString();
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/16546cc4/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java
index 2674fe3..a5221a2 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java
@@ -19,12 +19,15 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
+import java.util.List;
 
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
@@ -73,6 +76,7 @@ public class ExprNodeConstantDesc extends ExprNodeDesc implements Serializable {
   }
 
   public void setValue(Object value) {
+    // Kryo setter
     this.value = value;
   }
 
@@ -92,8 +96,7 @@ public class ExprNodeConstantDesc extends ExprNodeDesc implements Serializable {
     return "Const " + typeInfo.toString() + " " + value;
   }
 
-  @Override
-  public String getExprString() {
+  private static String getFormatted(TypeInfo typeInfo, Object value) {
     if (value == null) {
       return "null";
     }
@@ -109,8 +112,28 @@ public class ExprNodeConstantDesc extends ExprNodeDesc implements Serializable {
         hexChars[j * 2 + 1] = hexArray[v & 0x0F];
       }
       return new String(hexChars);
+    }
+    return value.toString();
+  }
+
+  @Override
+  public String getExprString() {
+    if (typeInfo.getCategory() == Category.PRIMITIVE) {
+      return getFormatted(typeInfo, value);
+    } else if (typeInfo.getCategory() == Category.STRUCT) {
+      StringBuilder sb = new StringBuilder();
+      sb.append("const struct(");
+      List<?> items = (List<?>) getWritableObjectInspector().getWritableConstantValue();
+      List<TypeInfo> structTypes = ((StructTypeInfo) typeInfo).getAllStructFieldTypeInfos();
+      for (int i = 0; i < structTypes.size(); i++) {
+        final Object o = (i < items.size()) ? items.get(i) : null;
+        sb.append(getFormatted(structTypes.get(i), o)).append(",");
+      }
+      sb.setCharAt(sb.length() - 1, ')');
+      return sb.toString();
     } else {
-      return value.toString();
+      // unknown type
+      return toString();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/16546cc4/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIn.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIn.java
index 56ac3e1..7660ca4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIn.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIn.java
@@ -60,7 +60,8 @@ import com.esotericsoftware.minlog.Log;
 public class GenericUDFIn extends GenericUDF {
 
   private transient ObjectInspector[] argumentOIs;
-  private Set<Object> constantInSet;
+  // this set is a copy of the arguments objects - avoid serializing
+  private transient Set<Object> constantInSet;
   private boolean isInSetConstant = true; //are variables from IN(...) constant
 
   private final BooleanWritable bw = new BooleanWritable();

http://git-wip-us.apache.org/repos/asf/hive/blob/16546cc4/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStruct.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStruct.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStruct.java
index 7df3f7d..7e286fb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStruct.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStruct.java
@@ -21,12 +21,13 @@ package org.apache.hadoop.hive.ql.udf.generic;
 import java.util.ArrayList;
 import java.util.Arrays;
 
-import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 
 @Description(name = "struct",
     value = "_FUNC_(col1, col2, col3, ...) - Creates a struct with the given field values")
@@ -44,9 +45,23 @@ public class GenericUDFStruct extends GenericUDF {
     for (int f = 1; f <= numFields; f++) {
       fname.add("col" + f);
     }
-    StructObjectInspector soi = 
-      ObjectInspectorFactory.getStandardStructObjectInspector(fname, Arrays.asList(arguments));
-    return soi;
+    boolean constantStruct = true;
+    for (int i = 0; i < arguments.length; i++) {
+      ObjectInspector oi = arguments[i];
+      constantStruct &= (oi.getCategory() == Category.PRIMITIVE)
+          && (oi instanceof ConstantObjectInspector);
+      if (constantStruct) {
+        // nested complex types trigger Kryo issue #216 in plan deserialization
+        ret[i] = ((ConstantObjectInspector) oi).getWritableConstantValue();
+      }
+    }
+    if (constantStruct) {
+      return ObjectInspectorFactory.getStandardConstantStructObjectInspector(fname,
+          Arrays.asList(arguments), Arrays.asList(ret));
+    } else {
+      return ObjectInspectorFactory.getStandardStructObjectInspector(fname,
+          Arrays.asList(arguments));
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/16546cc4/ql/src/test/results/clientpositive/null_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/null_cast.q.out b/ql/src/test/results/clientpositive/null_cast.q.out
index b5af69b..ff37fe7 100644
--- a/ql/src/test/results/clientpositive/null_cast.q.out
+++ b/ql/src/test/results/clientpositive/null_cast.q.out
@@ -23,7 +23,7 @@ STAGE PLANS:
             Row Limit Per Split: 1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: array(null,0) (type: array<int>), array(null,array()) (type: array<array<string>>), array(null,map()) (type: array<map<string,string>>), array(null,struct(0)) (type: array<struct<col1:int>>)
+              expressions: array(null,0) (type: array<int>), array(null,array()) (type: array<array<string>>), array(null,map()) (type: array<map<string,string>>), array(null,const struct(0)) (type: array<struct<col1:int>>)
               outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 500 Data size: 108000 Basic stats: COMPLETE Column stats: COMPLETE
               File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/16546cc4/ql/src/test/results/clientpositive/structin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/structin.q.out b/ql/src/test/results/clientpositive/structin.q.out
index e36fceb..81c792a 100644
--- a/ql/src/test/results/clientpositive/structin.q.out
+++ b/ql/src/test/results/clientpositive/structin.q.out
@@ -44,7 +44,7 @@ STAGE PLANS:
             alias: t11
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (struct(id,lineid)) IN (struct('1234-1111-0074578664','3'), struct('1234-1111-0074578695','1'), struct('1234-1111-0074580704','1'), struct('1234-1111-0074581619','2'), struct('1234-1111-0074582745','1'), struct('1234-1111-0074586625','1'), struct('1234-1111-0074019112','1'), struct('1234-1111-0074019610','1'), struct('1234-1111-0074022106','1')) (type: boolean)
+              predicate: (struct(id,lineid)) IN (const struct('1234-1111-0074578664','3'), const struct('1234-1111-0074578695','1'), const struct('1234-1111-0074580704','1'), const struct('1234-1111-0074581619','2'), const struct('1234-1111-0074582745','1'), const struct('1234-1111-0074586625','1'), const struct('1234-1111-0074019112','1'), const struct('1234-1111-0074019610','1'), const struct('1234-1111-0074022106','1')) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: id (type: string), lineid (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/16546cc4/ql/src/test/results/clientpositive/udf_inline.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_inline.q.out b/ql/src/test/results/clientpositive/udf_inline.q.out
index 7d372f3..f986abf 100644
--- a/ql/src/test/results/clientpositive/udf_inline.q.out
+++ b/ql/src/test/results/clientpositive/udf_inline.q.out
@@ -31,7 +31,7 @@ STAGE PLANS:
           alias: src
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: array(struct(1,'dude!'),struct(2,'Wheres'),struct(3,'my car?')) (type: array<struct<col1:int,col2:string>>)
+            expressions: array(const struct(1,'dude!'),const struct(2,'Wheres'),const struct(3,'my car?')) (type: array<struct<col1:int,col2:string>>)
             outputColumnNames: _col0
             Statistics: Num rows: 500 Data size: 32000 Basic stats: COMPLETE Column stats: COMPLETE
             UDTF Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/16546cc4/ql/src/test/results/clientpositive/udf_struct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_struct.q.out b/ql/src/test/results/clientpositive/udf_struct.q.out
index d0c56c7..0d2d71d 100644
--- a/ql/src/test/results/clientpositive/udf_struct.q.out
+++ b/ql/src/test/results/clientpositive/udf_struct.q.out
@@ -29,7 +29,7 @@ STAGE PLANS:
           Row Limit Per Split: 1
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: struct(1) (type: struct<col1:int>), struct(1,'a') (type: struct<col1:int,col2:string>), struct(1,'b',1.5).col1 (type: int), struct(1,struct('a',1.5)).col2.col1 (type: string)
+            expressions: const struct(1) (type: struct<col1:int>), const struct(1,'a') (type: struct<col1:int,col2:string>), struct(1,'b',1.5).col1 (type: int), struct(1,struct('a',1.5)).col2.col1 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
             Statistics: Num rows: 500 Data size: 184500 Basic stats: COMPLETE Column stats: COMPLETE
             ListSink

http://git-wip-us.apache.org/repos/asf/hive/blob/16546cc4/ql/src/test/results/clientpositive/udf_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_union.q.out b/ql/src/test/results/clientpositive/udf_union.q.out
index 73d4bdd..114040f 100644
--- a/ql/src/test/results/clientpositive/udf_union.q.out
+++ b/ql/src/test/results/clientpositive/udf_union.q.out
@@ -34,7 +34,7 @@ STAGE PLANS:
           Row Limit Per Split: 2
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: create_union(0,key) (type: uniontype<string>), create_union(if((key < 100), 0, 1),2.0,value) (type: uniontype<double,string>), create_union(1,'a',struct(2,'b')) (type: uniontype<string,struct<col1:int,col2:string>>)
+            expressions: create_union(0,key) (type: uniontype<string>), create_union(if((key < 100), 0, 1),2.0,value) (type: uniontype<double,string>), create_union(1,'a',const struct(2,'b')) (type: uniontype<string,struct<col1:int,col2:string>>)
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             ListSink

http://git-wip-us.apache.org/repos/asf/hive/blob/16546cc4/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
index 64dd512..00a6384 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java
@@ -1084,6 +1084,9 @@ public final class ObjectInspectorUtils {
             fieldObjectInspectors.add(getStandardObjectInspector(f
             .getFieldObjectInspector(), ObjectInspectorCopyOption.WRITABLE));
           }
+          if (value != null && (writableValue.getClass().isArray())) {
+            writableValue = java.util.Arrays.asList((Object[])writableValue);
+          }
           return ObjectInspectorFactory.getStandardConstantStructObjectInspector(
             fieldNames,
             fieldObjectInspectors,


[38/50] [abbrv] hive git commit: HIVE-11546: Projected columns read size should be scaled to split size for ORC Splits (Prasanth Jayachandran reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11546: Projected columns read size should be scaled to split size for ORC Splits (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e8b2c605
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e8b2c605
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e8b2c605

Branch: refs/heads/hbase-metastore
Commit: e8b2c605a05d06c1bfcd4c6bc611bc7f83306b38
Parents: cf0481f
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Fri Aug 14 14:21:51 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Fri Aug 14 14:21:51 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   | 30 ++++++-
 .../apache/hadoop/hive/ql/io/orc/Reader.java    |  7 ++
 .../hadoop/hive/ql/io/orc/ReaderImpl.java       |  5 +-
 .../hive/ql/io/orc/TestInputOutputFormat.java   | 95 ++++++++++++++++++--
 4 files changed, 125 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e8b2c605/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index fe2eccd..6ed7872 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -717,6 +717,7 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
     private ReaderImpl.FileMetaInfo fileMetaInfo;
     private Metadata metadata;
     private List<OrcProto.Type> types;
+    private boolean[] includedCols;
     private final boolean isOriginal;
     private final List<DeltaMetaData> deltas;
     private final boolean hasBase;
@@ -830,8 +831,14 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
         hosts = new String[hostList.size()];
         hostList.toArray(hosts);
       }
+
+      // scale the raw data size to split level based on ratio of split wrt to file length
+      final long fileLen = file.getLen();
+      final double splitRatio = (double) length / (double) fileLen;
+      final long scaledProjSize = projColsUncompressedSize > 0 ?
+          (long) (splitRatio * projColsUncompressedSize) : fileLen;
       return new OrcSplit(file.getPath(), offset, length, hosts, fileMetaInfo,
-          isOriginal, hasBase, deltas, projColsUncompressedSize);
+          isOriginal, hasBase, deltas, scaledProjSize);
     }
 
     /**
@@ -845,11 +852,12 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
 
       // figure out which stripes we need to read
       boolean[] includeStripe = null;
+
       // we can't eliminate stripes if there are deltas because the
       // deltas may change the rows making them match the predicate.
       if (deltas.isEmpty()) {
         Reader.Options options = new Reader.Options();
-        options.include(genIncludedColumns(types, context.conf, isOriginal));
+        options.include(includedCols);
         setSearchArgument(options, types, context.conf, isOriginal);
         // only do split pruning if HIVE-8732 has been fixed in the writer
         if (options.getSearchArgument() != null &&
@@ -930,8 +938,6 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
     private void populateAndCacheStripeDetails() throws IOException {
       Reader orcReader = OrcFile.createReader(file.getPath(),
           OrcFile.readerOptions(context.conf).filesystem(fs));
-      List<String> projCols = ColumnProjectionUtils.getReadColumnNames(context.conf);
-      // TODO: produce projColsUncompressedSize from projCols
       if (fileInfo != null) {
         stripes = fileInfo.stripeInfos;
         fileMetaInfo = fileInfo.fileMetaInfo;
@@ -959,6 +965,22 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
                   metadata, types, fileMetaInfo, writerVersion));
         }
       }
+      includedCols = genIncludedColumns(types, context.conf, isOriginal);
+      projColsUncompressedSize = computeProjectionSize(orcReader, includedCols, isOriginal);
+    }
+
+    private long computeProjectionSize(final Reader orcReader, final boolean[] includedCols,
+        final boolean isOriginal) {
+      final int rootIdx = getRootColumn(isOriginal);
+      List<Integer> internalColIds = Lists.newArrayList();
+      if (includedCols != null) {
+        for (int i = 0; i < includedCols.length; i++) {
+          if (includedCols[i]) {
+            internalColIds.add(rootIdx + i);
+          }
+        }
+      }
+      return orcReader.getRawDataSizeFromColIndices(internalColIds);
     }
 
     private boolean isStripeSatisfyPredicate(StripeStatistics stripeStatistics,

http://git-wip-us.apache.org/repos/asf/hive/blob/e8b2c605/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java
index 6f4f013..7bddefc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java
@@ -52,6 +52,13 @@ public interface Reader {
   long getRawDataSizeOfColumns(List<String> colNames);
 
   /**
+   * Get the deserialized data size of the specified columns ids
+   * @param colIds - internal column id (check orcfiledump for column ids)
+   * @return raw data size of columns
+   */
+  long getRawDataSizeFromColIndices(List<Integer> colIds);
+
+  /**
    * Get the user metadata keys.
    * @return the set of metadata keys
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/e8b2c605/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
index 3c0de3c..a6448b6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
@@ -575,7 +575,8 @@ public class ReaderImpl implements Reader {
     return deserializedSize;
   }
 
-  private long getRawDataSizeFromColIndices(List<Integer> colIndices) {
+  @Override
+  public long getRawDataSizeFromColIndices(List<Integer> colIndices) {
     long result = 0;
     for (int colIdx : colIndices) {
       result += getRawDataSizeOfColumn(colIdx);
@@ -620,7 +621,7 @@ public class ReaderImpl implements Reader {
     case BYTE:
       return numVals * JavaDataModel.get().primitive1();
     default:
-      LOG.debug("Unknown primitive category.");
+      LOG.debug("Unknown primitive category: " + type.getKind());
       break;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e8b2c605/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 6cb8529..0c12c89 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hive.ql.io.orc;
 
-import com.esotericsoftware.kryo.Kryo;
-import com.esotericsoftware.kryo.io.Output;
-
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -108,6 +105,9 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
 
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.io.Output;
+
 public class TestInputOutputFormat {
 
   public static String toKryo(SearchArgument sarg) {
@@ -902,14 +902,25 @@ public class TestInputOutputFormat {
     }
     fill(buffer, offset);
     footer.addTypes(OrcProto.Type.newBuilder()
-                     .setKind(OrcProto.Type.Kind.STRUCT)
-                     .addFieldNames("col1")
-                     .addSubtypes(1));
+        .setKind(OrcProto.Type.Kind.STRUCT)
+        .addFieldNames("col1")
+        .addSubtypes(1));
     footer.addTypes(OrcProto.Type.newBuilder()
         .setKind(OrcProto.Type.Kind.STRING));
     footer.setNumberOfRows(1000 * stripeLengths.length)
           .setHeaderLength(headerLen)
           .setContentLength(offset - headerLen);
+    footer.addStatistics(OrcProto.ColumnStatistics.newBuilder()
+        .setNumberOfValues(1000 * stripeLengths.length).build());
+    footer.addStatistics(OrcProto.ColumnStatistics.newBuilder()
+        .setNumberOfValues(1000 * stripeLengths.length)
+        .setStringStatistics(
+            OrcProto.StringStatistics.newBuilder()
+                .setMaximum("zzz")
+                .setMinimum("aaa")
+                .setSum(1000 * 3 * stripeLengths.length)
+                .build()
+        ).build());
     footer.build().writeTo(buffer);
     int footerEnd = buffer.getLength();
     OrcProto.PostScript ps =
@@ -1013,6 +1024,78 @@ public class TestInputOutputFormat {
   }
 
   @Test
+  public void testProjectedColumnSize() throws Exception {
+    long[] stripeSizes =
+        new long[]{200, 200, 200, 200, 100};
+    MockFileSystem fs = new MockFileSystem(conf,
+        new MockFile("mock:/a/file", 500,
+            createMockOrcFile(stripeSizes),
+            new MockBlock("host1-1", "host1-2", "host1-3"),
+            new MockBlock("host2-1", "host0", "host2-3"),
+            new MockBlock("host0", "host3-2", "host3-3"),
+            new MockBlock("host4-1", "host4-2", "host4-3"),
+            new MockBlock("host5-1", "host5-2", "host5-3")));
+    conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 300);
+    conf.setInt(OrcInputFormat.MIN_SPLIT_SIZE, 200);
+    conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
+    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    OrcInputFormat.Context context = new OrcInputFormat.Context(conf);
+    OrcInputFormat.SplitGenerator splitter =
+        new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
+            fs.getFileStatus(new Path("/a/file")), null, true,
+            new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null));
+    List<OrcSplit> results = splitter.call();
+    OrcSplit result = results.get(0);
+    assertEquals(3, results.size());
+    assertEquals(3, result.getStart());
+    assertEquals(400, result.getLength());
+    assertEquals(167468, result.getProjectedColumnsUncompressedSize());
+    result = results.get(1);
+    assertEquals(403, result.getStart());
+    assertEquals(400, result.getLength());
+    assertEquals(167468, result.getProjectedColumnsUncompressedSize());
+    result = results.get(2);
+    assertEquals(803, result.getStart());
+    assertEquals(100, result.getLength());
+    assertEquals(41867, result.getProjectedColumnsUncompressedSize());
+
+    // test min = 0, max = 0 generates each stripe
+    conf.setInt(OrcInputFormat.MIN_SPLIT_SIZE, 0);
+    conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 0);
+    context = new OrcInputFormat.Context(conf);
+    splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
+        fs.getFileStatus(new Path("/a/file")), null, true,
+        new ArrayList<AcidInputFormat.DeltaMetaData>(),
+        true, null, null));
+    results = splitter.call();
+    assertEquals(5, results.size());
+    for (int i = 0; i < stripeSizes.length; ++i) {
+      assertEquals("checking stripe " + i + " size",
+          stripeSizes[i], results.get(i).getLength());
+      if (i == stripeSizes.length - 1) {
+        assertEquals(41867, results.get(i).getProjectedColumnsUncompressedSize());
+      } else {
+        assertEquals(83734, results.get(i).getProjectedColumnsUncompressedSize());
+      }
+    }
+
+    // single split
+    conf.setInt(OrcInputFormat.MIN_SPLIT_SIZE, 100000);
+    conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 1000);
+    context = new OrcInputFormat.Context(conf);
+    splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
+        fs.getFileStatus(new Path("/a/file")), null, true,
+        new ArrayList<AcidInputFormat.DeltaMetaData>(),
+        true, null, null));
+    results = splitter.call();
+    assertEquals(1, results.size());
+    result = results.get(0);
+    assertEquals(3, result.getStart());
+    assertEquals(900, result.getLength());
+    assertEquals(376804, result.getProjectedColumnsUncompressedSize());
+  }
+
+  @Test
   @SuppressWarnings("unchecked,deprecation")
   public void testInOutFormat() throws Exception {
     Properties properties = new Properties();


[40/50] [abbrv] hive git commit: HIVE-11441: No DDL allowed on table if user accidentally set table location wrong (Daniel Dai reviewed by Thejas Nair)

Posted by se...@apache.org.
HIVE-11441: No DDL allowed on table if user accidentally set table location wrong (Daniel Dai reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2ccd0616
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2ccd0616
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2ccd0616

Branch: refs/heads/hbase-metastore
Commit: 2ccd061691cd52ed9fa341b61590edb2c022b031
Parents: 17e95c7
Author: Daniel Dai <da...@hortonworks.com>
Authored: Fri Aug 14 15:28:44 2015 -0700
Committer: Daniel Dai <da...@hortonworks.com>
Committed: Fri Aug 14 15:28:44 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java | 11 +++++++++++
 .../queries/clientnegative/alter_table_wrong_location.q  |  4 ++++
 .../clientnegative/alter_table_wrong_location.q.out      |  9 +++++++++
 3 files changed, 24 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2ccd0616/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 21625bc..9f8c756 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -23,6 +23,7 @@ import org.antlr.runtime.tree.CommonTree;
 import org.antlr.runtime.tree.Tree;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -137,6 +138,7 @@ import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.util.StringUtils;
 
+import java.io.FileNotFoundException;
 import java.io.Serializable;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
@@ -1464,6 +1466,15 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       HashMap<String, String> partSpec) throws SemanticException {
 
     String newLocation = unescapeSQLString(ast.getChild(0).getText());
+    try {
+      // To make sure host/port pair is valid, the status of the location
+      // does not matter
+      FileSystem.get(new URI(newLocation), conf).getFileStatus(new Path(newLocation));
+    } catch (FileNotFoundException e) {
+      // Only check host/port pair is valid, wheter the file exist or not does not matter
+    } catch (Exception e) {
+      throw new SemanticException("Cannot connect to namenode, please check if host/port pair for " + newLocation + " is valid", e);
+    }
     addLocationToOutputs(newLocation);
     AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, newLocation, partSpec);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/2ccd0616/ql/src/test/queries/clientnegative/alter_table_wrong_location.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/alter_table_wrong_location.q b/ql/src/test/queries/clientnegative/alter_table_wrong_location.q
new file mode 100644
index 0000000..3721867
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/alter_table_wrong_location.q
@@ -0,0 +1,4 @@
+create table testwrongloc(id int);
+
+-- Assume port 12345 is not open
+alter table testwrongloc set location "hdfs://localhost:12345/tmp/testwrongloc";

http://git-wip-us.apache.org/repos/asf/hive/blob/2ccd0616/ql/src/test/results/clientnegative/alter_table_wrong_location.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_table_wrong_location.q.out b/ql/src/test/results/clientnegative/alter_table_wrong_location.q.out
new file mode 100644
index 0000000..d788d55
--- /dev/null
+++ b/ql/src/test/results/clientnegative/alter_table_wrong_location.q.out
@@ -0,0 +1,9 @@
+PREHOOK: query: create table testwrongloc(id int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testwrongloc
+POSTHOOK: query: create table testwrongloc(id int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testwrongloc
+#### A masked pattern was here ####


[46/50] [abbrv] hive git commit: HIVE-11568 : merge master into branch (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index e58c146,cdbae95..72b2cc3
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@@ -31,6 -33,8 +33,8 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
  public class ThriftHiveMetastore {
  
    /**
@@@ -290,254 -294,254 +296,256 @@@
  
    public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface {
  
-     public void getMetaConf(String key, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.getMetaConf_call> resultHandler) throws org.apache.thrift.TException;
+     public void getMetaConf(String key, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void setMetaConf(String key, String value, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void setMetaConf(String key, String value, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.setMetaConf_call> resultHandler) throws org.apache.thrift.TException;
+     public void create_database(Database database, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void create_database(Database database, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.create_database_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_database(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_database(String name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_database_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_database(String name, boolean deleteData, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_database(String name, boolean deleteData, boolean cascade, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_database_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_databases(String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_databases(String pattern, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_databases_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_all_databases(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_all_databases(org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_all_databases_call> resultHandler) throws org.apache.thrift.TException;
+     public void alter_database(String dbname, Database db, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void alter_database(String dbname, Database db, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.alter_database_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_type(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_type(String name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_type_call> resultHandler) throws org.apache.thrift.TException;
+     public void create_type(Type type, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void create_type(Type type, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.create_type_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_type(String type, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_type(String type, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_type_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_type_all(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_type_all(String name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_type_all_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_fields(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_fields(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_fields_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_fields_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_schema(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_schema(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_schema_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_schema_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void create_table(Table tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void create_table(Table tbl, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.create_table_call> resultHandler) throws org.apache.thrift.TException;
+     public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.create_table_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_table(String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_table(String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_table_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_table_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_tables(String db_name, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_tables(String db_name, String pattern, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_tables_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_all_tables(String db_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_all_tables(String db_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_all_tables_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_table(String dbname, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_table(String dbname, String tbl_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_table_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_table_objects_by_name(String dbname, List<String> tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_table_objects_by_name(String dbname, List<String> tbl_names, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_table_objects_by_name_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_table_names_by_filter_call> resultHandler) throws org.apache.thrift.TException;
+     public void alter_table(String dbname, String tbl_name, Table new_tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void alter_table(String dbname, String tbl_name, Table new_tbl, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.alter_table_call> resultHandler) throws org.apache.thrift.TException;
+     public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.alter_table_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.alter_table_with_cascade_call> resultHandler) throws org.apache.thrift.TException;
+     public void add_partition(Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void add_partition(Partition new_part, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.add_partition_call> resultHandler) throws org.apache.thrift.TException;
+     public void add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.add_partition_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void add_partitions(List<Partition> new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void add_partitions(List<Partition> new_parts, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.add_partitions_call> resultHandler) throws org.apache.thrift.TException;
+     public void add_partitions_pspec(List<PartitionSpec> new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void add_partitions_pspec(List<PartitionSpec> new_parts, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.add_partitions_pspec_call> resultHandler) throws org.apache.thrift.TException;
+     public void append_partition(String db_name, String tbl_name, List<String> part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void append_partition(String db_name, String tbl_name, List<String> part_vals, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.append_partition_call> resultHandler) throws org.apache.thrift.TException;
+     public void add_partitions_req(AddPartitionsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void add_partitions_req(AddPartitionsRequest request, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.add_partitions_req_call> resultHandler) throws org.apache.thrift.TException;
+     public void append_partition_with_environment_context(String db_name, String tbl_name, List<String> part_vals, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void append_partition_with_environment_context(String db_name, String tbl_name, List<String> part_vals, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.append_partition_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void append_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void append_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.append_partition_by_name_call> resultHandler) throws org.apache.thrift.TException;
+     public void append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.append_partition_by_name_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_partition(String db_name, String tbl_name, List<String> part_vals, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_partition(String db_name, String tbl_name, List<String> part_vals, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_partition_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_partition_with_environment_context(String db_name, String tbl_name, List<String> part_vals, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_partition_with_environment_context(String db_name, String tbl_name, List<String> part_vals, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_partition_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_partition_by_name(String db_name, String tbl_name, String part_name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_partition_by_name(String db_name, String tbl_name, String part_name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_partition_by_name_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_partition_by_name_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_partitions_req(DropPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_partitions_req(DropPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_partitions_req_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partition(String db_name, String tbl_name, List<String> part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partition(String db_name, String tbl_name, List<String> part_vals, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partition_call> resultHandler) throws org.apache.thrift.TException;
+     public void exchange_partition(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void exchange_partition(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.exchange_partition_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partition_with_auth_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partition_by_name_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partitions(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partitions(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partitions_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partitions_with_auth_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partitions_pspec_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partition_names(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partition_names(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partition_names_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partitions_ps(String db_name, String tbl_name, List<String> part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partitions_ps(String db_name, String tbl_name, List<String> part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partitions_ps_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partitions_ps_with_auth(String db_name, String tbl_name, List<String> part_vals, short max_parts, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partitions_ps_with_auth(String db_name, String tbl_name, List<String> part_vals, short max_parts, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partitions_ps_with_auth_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partition_names_ps(String db_name, String tbl_name, List<String> part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partition_names_ps(String db_name, String tbl_name, List<String> part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partition_names_ps_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partitions_by_filter_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_part_specs_by_filter_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partitions_by_expr(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partitions_by_expr(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partitions_by_expr_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partitions_by_names(String db_name, String tbl_name, List<String> names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partitions_by_names(String db_name, String tbl_name, List<String> names, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partitions_by_names_call> resultHandler) throws org.apache.thrift.TException;
+     public void alter_partition(String db_name, String tbl_name, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void alter_partition(String db_name, String tbl_name, Partition new_part, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.alter_partition_call> resultHandler) throws org.apache.thrift.TException;
+     public void alter_partitions(String db_name, String tbl_name, List<Partition> new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void alter_partitions(String db_name, String tbl_name, List<Partition> new_parts, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.alter_partitions_call> resultHandler) throws org.apache.thrift.TException;
+     public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.alter_partition_with_environment_context_call> resultHandler) throws org.apache.thrift.TException;
+     public void rename_partition(String db_name, String tbl_name, List<String> part_vals, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void rename_partition(String db_name, String tbl_name, List<String> part_vals, Partition new_part, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.rename_partition_call> resultHandler) throws org.apache.thrift.TException;
+     public void partition_name_has_valid_characters(List<String> part_vals, boolean throw_exception, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void partition_name_has_valid_characters(List<String> part_vals, boolean throw_exception, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.partition_name_has_valid_characters_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_config_value(String name, String defaultValue, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_config_value(String name, String defaultValue, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_config_value_call> resultHandler) throws org.apache.thrift.TException;
+     public void partition_name_to_vals(String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void partition_name_to_vals(String part_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.partition_name_to_vals_call> resultHandler) throws org.apache.thrift.TException;
+     public void partition_name_to_spec(String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void partition_name_to_spec(String part_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.partition_name_to_spec_call> resultHandler) throws org.apache.thrift.TException;
+     public void markPartitionForEvent(String db_name, String tbl_name, Map<String,String> part_vals, PartitionEventType eventType, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void markPartitionForEvent(String db_name, String tbl_name, Map<String,String> part_vals, PartitionEventType eventType, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.markPartitionForEvent_call> resultHandler) throws org.apache.thrift.TException;
+     public void isPartitionMarkedForEvent(String db_name, String tbl_name, Map<String,String> part_vals, PartitionEventType eventType, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void isPartitionMarkedForEvent(String db_name, String tbl_name, Map<String,String> part_vals, PartitionEventType eventType, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.isPartitionMarkedForEvent_call> resultHandler) throws org.apache.thrift.TException;
+     public void add_index(Index new_index, Table index_table, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void add_index(Index new_index, Table index_table, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.add_index_call> resultHandler) throws org.apache.thrift.TException;
+     public void alter_index(String dbname, String base_tbl_name, String idx_name, Index new_idx, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void alter_index(String dbname, String base_tbl_name, String idx_name, Index new_idx, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.alter_index_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_index_by_name(String db_name, String tbl_name, String index_name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_index_by_name(String db_name, String tbl_name, String index_name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_index_by_name_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_index_by_name(String db_name, String tbl_name, String index_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_index_by_name(String db_name, String tbl_name, String index_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_index_by_name_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_indexes(String db_name, String tbl_name, short max_indexes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_indexes(String db_name, String tbl_name, short max_indexes, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_indexes_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_index_names(String db_name, String tbl_name, short max_indexes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_index_names(String db_name, String tbl_name, short max_indexes, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_index_names_call> resultHandler) throws org.apache.thrift.TException;
+     public void update_table_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void update_table_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.update_table_column_statistics_call> resultHandler) throws org.apache.thrift.TException;
+     public void update_partition_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void update_partition_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.update_partition_column_statistics_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_table_column_statistics_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partition_column_statistics_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_table_statistics_req(TableStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_table_statistics_req(TableStatsRequest request, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_table_statistics_req_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_partitions_statistics_req(PartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_partitions_statistics_req(PartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_partitions_statistics_req_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_aggr_stats_for(PartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_aggr_stats_for(PartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_aggr_stats_for_call> resultHandler) throws org.apache.thrift.TException;
+     public void set_aggr_stats_for(SetPartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void set_aggr_stats_for(SetPartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.set_aggr_stats_for_call> resultHandler) throws org.apache.thrift.TException;
+     public void delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.delete_partition_column_statistics_call> resultHandler) throws org.apache.thrift.TException;
+     public void delete_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void delete_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.delete_table_column_statistics_call> resultHandler) throws org.apache.thrift.TException;
+     public void create_function(Function func, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void create_function(Function func, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.create_function_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_function(String dbName, String funcName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_function(String dbName, String funcName, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_function_call> resultHandler) throws org.apache.thrift.TException;
+     public void alter_function(String dbName, String funcName, Function newFunc, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void alter_function(String dbName, String funcName, Function newFunc, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.alter_function_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_functions(String dbName, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_functions(String dbName, String pattern, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_functions_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_function(String dbName, String funcName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_function(String dbName, String funcName, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_function_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_all_functions(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void create_role(Role role, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.create_role_call> resultHandler) throws org.apache.thrift.TException;
+     public void create_role(Role role, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void drop_role(String role_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.drop_role_call> resultHandler) throws org.apache.thrift.TException;
+     public void drop_role(String role_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_role_names(org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_role_names_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_role_names(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void grant_role(String role_name, String principal_name, PrincipalType principal_type, String grantor, PrincipalType grantorType, boolean grant_option, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.grant_role_call> resultHandler) throws org.apache.thrift.TException;
+     public void grant_role(String role_name, String principal_name, PrincipalType principal_type, String grantor, PrincipalType grantorType, boolean grant_option, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void revoke_role(String role_name, String principal_name, PrincipalType principal_type, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.revoke_role_call> resultHandler) throws org.apache.thrift.TException;
+     public void revoke_role(String role_name, String principal_name, PrincipalType principal_type, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void list_roles(String principal_name, PrincipalType principal_type, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.list_roles_call> resultHandler) throws org.apache.thrift.TException;
+     public void list_roles(String principal_name, PrincipalType principal_type, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void grant_revoke_role(GrantRevokeRoleRequest request, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.grant_revoke_role_call> resultHandler) throws org.apache.thrift.TException;
+     public void grant_revoke_role(GrantRevokeRoleRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_principals_in_role(GetPrincipalsInRoleRequest request, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_principals_in_role_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_principals_in_role(GetPrincipalsInRoleRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_role_grants_for_principal(GetRoleGrantsForPrincipalRequest request, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_role_grants_for_principal_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_role_grants_for_principal(GetRoleGrantsForPrincipalRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_privilege_set(HiveObjectRef hiveObject, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_privilege_set_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_privilege_set(HiveObjectRef hiveObject, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void list_privileges(String principal_name, PrincipalType principal_type, HiveObjectRef hiveObject, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.list_privileges_call> resultHandler) throws org.apache.thrift.TException;
+     public void list_privileges(String principal_name, PrincipalType principal_type, HiveObjectRef hiveObject, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void grant_privileges(PrivilegeBag privileges, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.grant_privileges_call> resultHandler) throws org.apache.thrift.TException;
+     public void grant_privileges(PrivilegeBag privileges, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void revoke_privileges(PrivilegeBag privileges, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.revoke_privileges_call> resultHandler) throws org.apache.thrift.TException;
+     public void revoke_privileges(PrivilegeBag privileges, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void grant_revoke_privileges(GrantRevokePrivilegeRequest request, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.grant_revoke_privileges_call> resultHandler) throws org.apache.thrift.TException;
+     public void grant_revoke_privileges(GrantRevokePrivilegeRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void set_ugi(String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.set_ugi_call> resultHandler) throws org.apache.thrift.TException;
+     public void set_ugi(String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_delegation_token(String token_owner, String renewer_kerberos_principal_name, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_delegation_token_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_delegation_token(String token_owner, String renewer_kerberos_principal_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void renew_delegation_token(String token_str_form, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.renew_delegation_token_call> resultHandler) throws org.apache.thrift.TException;
+     public void renew_delegation_token(String token_str_form, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void cancel_delegation_token(String token_str_form, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.cancel_delegation_token_call> resultHandler) throws org.apache.thrift.TException;
+     public void cancel_delegation_token(String token_str_form, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_open_txns(org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_open_txns_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_open_txns(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_open_txns_info(org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_open_txns_info_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_open_txns_info(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void open_txns(OpenTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.open_txns_call> resultHandler) throws org.apache.thrift.TException;
+     public void open_txns(OpenTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void abort_txn(AbortTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.abort_txn_call> resultHandler) throws org.apache.thrift.TException;
+     public void abort_txn(AbortTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void commit_txn(CommitTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.commit_txn_call> resultHandler) throws org.apache.thrift.TException;
+     public void commit_txn(CommitTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void lock(LockRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.lock_call> resultHandler) throws org.apache.thrift.TException;
+     public void lock(LockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void check_lock(CheckLockRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.check_lock_call> resultHandler) throws org.apache.thrift.TException;
+     public void check_lock(CheckLockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void unlock(UnlockRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.unlock_call> resultHandler) throws org.apache.thrift.TException;
+     public void unlock(UnlockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void show_locks(ShowLocksRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.show_locks_call> resultHandler) throws org.apache.thrift.TException;
+     public void show_locks(ShowLocksRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void heartbeat(HeartbeatRequest ids, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.heartbeat_call> resultHandler) throws org.apache.thrift.TException;
+     public void heartbeat(HeartbeatRequest ids, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void heartbeat_txn_range(HeartbeatTxnRangeRequest txns, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.heartbeat_txn_range_call> resultHandler) throws org.apache.thrift.TException;
+     public void heartbeat_txn_range(HeartbeatTxnRangeRequest txns, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void compact(CompactionRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.compact_call> resultHandler) throws org.apache.thrift.TException;
+     public void compact(CompactionRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void show_compact(ShowCompactRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.show_compact_call> resultHandler) throws org.apache.thrift.TException;
+     public void show_compact(ShowCompactRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void add_dynamic_partitions(AddDynamicPartitions rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.add_dynamic_partitions_call> resultHandler) throws org.apache.thrift.TException;
+     public void add_dynamic_partitions(AddDynamicPartitions rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_next_notification(NotificationEventRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_next_notification_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_next_notification(NotificationEventRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void get_current_notificationEventId(org.apache.thrift.async.AsyncMethodCallback<AsyncClient.get_current_notificationEventId_call> resultHandler) throws org.apache.thrift.TException;
+     public void get_current_notificationEventId(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void fire_listener_event(FireEventRequest rqst, org.apache.thrift.async.AsyncMethodCallback<AsyncClient.fire_listener_event_call> resultHandler) throws org.apache.thrift.TException;
+     public void fire_listener_event(FireEventRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
  
-     public void flushCache(org.apache.thrift.async.AsyncMethodCallback<AsyncClient.flushCache_call> resultHandler) throws org.apache.thrift.TException;
++    public void flushCache(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 +
    }
  
    public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface {
@@@ -8613,35 -8652,6 +8675,35 @@@
        }
      }
  
-     public void flushCache(org.apache.thrift.async.AsyncMethodCallback<flushCache_call> resultHandler) throws org.apache.thrift.TException {
++    public void flushCache(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
 +      checkReady();
 +      flushCache_call method_call = new flushCache_call(resultHandler, this, ___protocolFactory, ___transport);
 +      this.___currentMethod = method_call;
 +      ___manager.call(method_call);
 +    }
 +
 +    public static class flushCache_call extends org.apache.thrift.async.TAsyncMethodCall {
-       public flushCache_call(org.apache.thrift.async.AsyncMethodCallback<flushCache_call> resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
++      public flushCache_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
 +        super(client, protocolFactory, transport, resultHandler, false);
 +      }
 +
 +      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
 +        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("flushCache", org.apache.thrift.protocol.TMessageType.CALL, 0));
 +        flushCache_args args = new flushCache_args();
 +        args.write(prot);
 +        prot.writeMessageEnd();
 +      }
 +
 +      public void getResult() throws org.apache.thrift.TException {
 +        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
 +          throw new IllegalStateException("Method call not finished!");
 +        }
 +        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
 +        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
 +        (new Client(prot)).recv_flushCache();
 +      }
 +    }
 +
    }
  
    public static class Processor<I extends Iface> extends com.facebook.fb303.FacebookService.Processor<I> implements org.apache.thrift.TProcessor {
@@@ -11979,96 -12013,256 +12066,277 @@@
        }
      }
  
 +    public static class flushCache<I extends Iface> extends org.apache.thrift.ProcessFunction<I, flushCache_args> {
 +      public flushCache() {
 +        super("flushCache");
 +      }
 +
 +      public flushCache_args getEmptyArgsInstance() {
 +        return new flushCache_args();
 +      }
 +
 +      protected boolean isOneway() {
 +        return false;
 +      }
 +
 +      public flushCache_result getResult(I iface, flushCache_args args) throws org.apache.thrift.TException {
 +        flushCache_result result = new flushCache_result();
 +        iface.flushCache();
 +        return result;
 +      }
 +    }
 +
    }
  
-   public static class getMetaConf_args implements org.apache.thrift.TBase<getMetaConf_args, getMetaConf_args._Fields>, java.io.Serializable, Cloneable   {
-     private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_args");
- 
-     private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1);
+   public static class AsyncProcessor<I extends AsyncIface> extends com.facebook.fb303.FacebookService.AsyncProcessor<I> {
+     private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName());
+     public AsyncProcessor(I iface) {
+       super(iface, getProcessMap(new HashMap<String, org.apache.thrift.AsyncProcessFunction<I, ? extends org.apache.thrift.TBase, ?>>()));
+     }
  
-     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
-     static {
-       schemes.put(StandardScheme.class, new getMetaConf_argsStandardSchemeFactory());
-       schemes.put(TupleScheme.class, new getMetaConf_argsTupleSchemeFactory());
+     protected AsyncProcessor(I iface, Map<String,  org.apache.thrift.AsyncProcessFunction<I, ? extends  org.apache.thrift.TBase, ?>> processMap) {
+       super(iface, getProcessMap(processMap));
      }
  
-     private String key; // required
+     private static <I extends AsyncIface> Map<String,  org.apache.thrift.AsyncProcessFunction<I, ? extends  org.apache.thrift.TBase,?>> getProcessMap(Map<String,  org.apache.thrift.AsyncProcessFunction<I, ? extends  org.apache.thrift.TBase, ?>> processMap) {
+       processMap.put("getMetaConf", new getMetaConf());
+       processMap.put("setMetaConf", new setMetaConf());
+       processMap.put("create_database", new create_database());
+       processMap.put("get_database", new get_database());
+       processMap.put("drop_database", new drop_database());
+       processMap.put("get_databases", new get_databases());
+       processMap.put("get_all_databases", new get_all_databases());
+       processMap.put("alter_database", new alter_database());
+       processMap.put("get_type", new get_type());
+       processMap.put("create_type", new create_type());
+       processMap.put("drop_type", new drop_type());
+       processMap.put("get_type_all", new get_type_all());
+       processMap.put("get_fields", new get_fields());
+       processMap.put("get_fields_with_environment_context", new get_fields_with_environment_context());
+       processMap.put("get_schema", new get_schema());
+       processMap.put("get_schema_with_environment_context", new get_schema_with_environment_context());
+       processMap.put("create_table", new create_table());
+       processMap.put("create_table_with_environment_context", new create_table_with_environment_context());
+       processMap.put("drop_table", new drop_table());
+       processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
+       processMap.put("get_tables", new get_tables());
+       processMap.put("get_all_tables", new get_all_tables());
+       processMap.put("get_table", new get_table());
+       processMap.put("get_table_objects_by_name", new get_table_objects_by_name());
+       processMap.put("get_table_names_by_filter", new get_table_names_by_filter());
+       processMap.put("alter_table", new alter_table());
+       processMap.put("alter_table_with_environment_context", new alter_table_with_environment_context());
+       processMap.put("alter_table_with_cascade", new alter_table_with_cascade());
+       processMap.put("add_partition", new add_partition());
+       processMap.put("add_partition_with_environment_context", new add_partition_with_environment_context());
+       processMap.put("add_partitions", new add_partitions());
+       processMap.put("add_partitions_pspec", new add_partitions_pspec());
+       processMap.put("append_partition", new append_partition());
+       processMap.put("add_partitions_req", new add_partitions_req());
+       processMap.put("append_partition_with_environment_context", new append_partition_with_environment_context());
+       processMap.put("append_partition_by_name", new append_partition_by_name());
+       processMap.put("append_partition_by_name_with_environment_context", new append_partition_by_name_with_environment_context());
+       processMap.put("drop_partition", new drop_partition());
+       processMap.put("drop_partition_with_environment_context", new drop_partition_with_environment_context());
+       processMap.put("drop_partition_by_name", new drop_partition_by_name());
+       processMap.put("drop_partition_by_name_with_environment_context", new drop_partition_by_name_with_environment_context());
+       processMap.put("drop_partitions_req", new drop_partitions_req());
+       processMap.put("get_partition", new get_partition());
+       processMap.put("exchange_partition", new exchange_partition());
+       processMap.put("get_partition_with_auth", new get_partition_with_auth());
+       processMap.put("get_partition_by_name", new get_partition_by_name());
+       processMap.put("get_partitions", new get_partitions());
+       processMap.put("get_partitions_with_auth", new get_partitions_with_auth());
+       processMap.put("get_partitions_pspec", new get_partitions_pspec());
+       processMap.put("get_partition_names", new get_partition_names());
+       processMap.put("get_partitions_ps", new get_partitions_ps());
+       processMap.put("get_partitions_ps_with_auth", new get_partitions_ps_with_auth());
+       processMap.put("get_partition_names_ps", new get_partition_names_ps());
+       processMap.put("get_partitions_by_filter", new get_partitions_by_filter());
+       processMap.put("get_part_specs_by_filter", new get_part_specs_by_filter());
+       processMap.put("get_partitions_by_expr", new get_partitions_by_expr());
+       processMap.put("get_partitions_by_names", new get_partitions_by_names());
+       processMap.put("alter_partition", new alter_partition());
+       processMap.put("alter_partitions", new alter_partitions());
+       processMap.put("alter_partition_with_environment_context", new alter_partition_with_environment_context());
+       processMap.put("rename_partition", new rename_partition());
+       processMap.put("partition_name_has_valid_characters", new partition_name_has_valid_characters());
+       processMap.put("get_config_value", new get_config_value());
+       processMap.put("partition_name_to_vals", new partition_name_to_vals());
+       processMap.put("partition_name_to_spec", new partition_name_to_spec());
+       processMap.put("markPartitionForEvent", new markPartitionForEvent());
+       processMap.put("isPartitionMarkedForEvent", new isPartitionMarkedForEvent());
+       processMap.put("add_index", new add_index());
+       processMap.put("alter_index", new alter_index());
+       processMap.put("drop_index_by_name", new drop_index_by_name());
+       processMap.put("get_index_by_name", new get_index_by_name());
+       processMap.put("get_indexes", new get_indexes());
+       processMap.put("get_index_names", new get_index_names());
+       processMap.put("update_table_column_statistics", new update_table_column_statistics());
+       processMap.put("update_partition_column_statistics", new update_partition_column_statistics());
+       processMap.put("get_table_column_statistics", new get_table_column_statistics());
+       processMap.put("get_partition_column_statistics", new get_partition_column_statistics());
+       processMap.put("get_table_statistics_req", new get_table_statistics_req());
+       processMap.put("get_partitions_statistics_req", new get_partitions_statistics_req());
+       processMap.put("get_aggr_stats_for", new get_aggr_stats_for());
+       processMap.put("set_aggr_stats_for", new set_aggr_stats_for());
+       processMap.put("delete_partition_column_statistics", new delete_partition_column_statistics());
+       processMap.put("delete_table_column_statistics", new delete_table_column_statistics());
+       processMap.put("create_function", new create_function());
+       processMap.put("drop_function", new drop_function());
+       processMap.put("alter_function", new alter_function());
+       processMap.put("get_functions", new get_functions());
+       processMap.put("get_function", new get_function());
+       processMap.put("get_all_functions", new get_all_functions());
+       processMap.put("create_role", new create_role());
+       processMap.put("drop_role", new drop_role());
+       processMap.put("get_role_names", new get_role_names());
+       processMap.put("grant_role", new grant_role());
+       processMap.put("revoke_role", new revoke_role());
+       processMap.put("list_roles", new list_roles());
+       processMap.put("grant_revoke_role", new grant_revoke_role());
+       processMap.put("get_principals_in_role", new get_principals_in_role());
+       processMap.put("get_role_grants_for_principal", new get_role_grants_for_principal());
+       processMap.put("get_privilege_set", new get_privilege_set());
+       processMap.put("list_privileges", new list_privileges());
+       processMap.put("grant_privileges", new grant_privileges());
+       processMap.put("revoke_privileges", new revoke_privileges());
+       processMap.put("grant_revoke_privileges", new grant_revoke_privileges());
+       processMap.put("set_ugi", new set_ugi());
+       processMap.put("get_delegation_token", new get_delegation_token());
+       processMap.put("renew_delegation_token", new renew_delegation_token());
+       processMap.put("cancel_delegation_token", new cancel_delegation_token());
+       processMap.put("get_open_txns", new get_open_txns());
+       processMap.put("get_open_txns_info", new get_open_txns_info());
+       processMap.put("open_txns", new open_txns());
+       processMap.put("abort_txn", new abort_txn());
+       processMap.put("commit_txn", new commit_txn());
+       processMap.put("lock", new lock());
+       processMap.put("check_lock", new check_lock());
+       processMap.put("unlock", new unlock());
+       processMap.put("show_locks", new show_locks());
+       processMap.put("heartbeat", new heartbeat());
+       processMap.put("heartbeat_txn_range", new heartbeat_txn_range());
+       processMap.put("compact", new compact());
+       processMap.put("show_compact", new show_compact());
+       processMap.put("add_dynamic_partitions", new add_dynamic_partitions());
+       processMap.put("get_next_notification", new get_next_notification());
+       processMap.put("get_current_notificationEventId", new get_current_notificationEventId());
+       processMap.put("fire_listener_event", new fire_listener_event());
++      processMap.put("flushCache", new flushCache());
+       return processMap;
+     }
  
-     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
-     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-       KEY((short)1, "key");
+     public static class getMetaConf<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, getMetaConf_args, String> {
+       public getMetaConf() {
+         super("getMetaConf");
+       }
  
-       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+       public getMetaConf_args getEmptyArgsInstance() {
+         return new getMetaConf_args();
+       }
  
-       static {
-         for (_Fields field : EnumSet.allOf(_Fields.class)) {
-           byName.put(field.getFieldName(), field);
-         }
+       public AsyncMethodCallback<String> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+         final org.apache.thrift.AsyncProcessFunction fcall = this;
+         return new AsyncMethodCallback<String>() { 
+           public void onComplete(String o) {
+             getMetaConf_result result = new getMetaConf_result();
+             result.success = o;
+             try {
+               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+               return;
+             } catch (Exception e) {
+               LOGGER.error("Exception writing to internal frame buffer", e);
+             }
+             fb.close();
+           }
+           public void onError(Exception e) {
+             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+             org.apache.thrift.TBase msg;
+             getMetaConf_result result = new getMetaConf_result();
+             if (e instanceof MetaException) {
+                         result.o1 = (MetaException) e;
+                         result.setO1IsSet(true);
+                         msg = result;
+             }
+              else 
+             {
+               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+             }
+             try {
+               fcall.sendResponse(fb,msg,msgType,seqid);
+               return;
+             } catch (Exception ex) {
+               LOGGER.error("Exception writing to internal frame buffer", ex);
+             }
+             fb.close();
+           }
+         };
        }
  
-       /**
-        * Find the _Fields constant that matches fieldId, or null if its not found.
-        */
-       public static _Fields findByThriftId(int fieldId) {
-         switch(fieldId) {
-           case 1: // KEY
-             return KEY;
-           default:
-             return null;
-         }
+       protected boolean isOneway() {
+         return false;
        }
  
-       /**
-        * Find the _Fields constant that matches fieldId, throwing an exception
-        * if it is not found.
-        */
-       public static _Fields findByThriftIdOrThrow(int fieldId) {
-         _Fields fields = findByThriftId(fieldId);
-         if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
-         return fields;
+       public void start(I iface, getMetaConf_args args, org.apache.thrift.async.AsyncMethodCallback<String> resultHandler) throws TException {
+         iface.getMetaConf(args.key,resultHandler);
        }
+     }
  
-       /**
-        * Find the _Fields constant that matches name, or null if its not found.
-        */
-       public static _Fields findByName(String name) {
-         return byName.get(name);
+     public static class setMetaConf<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, setMetaConf_args, Void> {
+       public setMetaConf() {
+         super("setMetaConf");
        }
  
-       private final short _thriftId;
-       private final String _fieldName;
+       public setMetaConf_args getEmptyArgsInstance() {
+         return new setMetaConf_args();
+       }
  
-       _Fields(short thriftId, String fieldName) {
-         _thriftId = thriftId;
-         _fieldName = fieldName;
+       public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+         final org.apache.thrift.AsyncProcessFunction fcall = this;
+         return new AsyncMethodCallback<Void>() { 
+           public void onComplete(Void o) {
+             setMetaConf_result result = new setMetaConf_result();
+             try {
+               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+               return;
+             } catch (Exception e) {
+               LOGGER.error("Exception writing to internal frame buffer", e);
+             }
+             fb.close();
+           }
+           public void onError(Exception e) {
+             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+             org.apache.thrift.TBase msg;
+             setMetaConf_result result = new setMetaConf_result();
+             if (e instanceof MetaException) {
+                         result.o1 = (MetaException) e;
+                         result.setO1IsSet(true);
+                         msg = result;
+             }
+              else 
+             {
+               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+             }
+             try {
+               fcall.sendResponse(fb,msg,msgType,seqid);
+               return;
+             } catch (Exception ex) {
+               LOGGER.error("Exception writing to internal frame buffer", ex);
+             }
+             fb.close();
+           }
+         };
        }
  
-       public short getThriftFieldId() {
-         return _thriftId;
+       protected boolean isOneway() {
+         return false;
        }
  
-       public String getFieldName() {
-         return _fieldName;
+       public void start(I iface, setMetaConf_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+         iface.setMetaConf(args.key, args.value,resultHandler);
        }
      }
  
@@@ -12548,133 -13295,7199 +13369,7249 @@@
        }
      }
  
-     public Object getFieldValue(_Fields field) {
-       switch (field) {
-       case SUCCESS:
-         return getSuccess();
+     public static class drop_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_table_args, Void> {
+       public drop_table() {
+         super("drop_table");
+       }
  
-       case O1:
-         return getO1();
+       public drop_table_args getEmptyArgsInstance() {
+         return new drop_table_args();
+       }
  
+       public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+         final org.apache.thrift.AsyncProcessFunction fcall = this;
+         return new AsyncMethodCallback<Void>() { 
+           public void onComplete(Void o) {
+             drop_table_result result = new drop_table_result();
+             try {
+               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+               return;
+             } catch (Exception e) {
+               LOGGER.error("Exception writing to internal frame buffer", e);
+             }
+             fb.close();
+           }
+           public void onError(Exception e) {
+             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+             org.apache.thrift.TBase msg;
+             drop_table_result result = new drop_table_result();
+             if (e instanceof NoSuchObjectException) {
+                         result.o1 = (NoSuchObjectException) e;
+                         result.setO1IsSet(true);
+                         msg = result;
+             }
+             else             if (e instanceof MetaException) {
+                         result.o3 = (MetaException) e;
+                         result.setO3IsSet(true);
+                         msg = result;
+             }
+              else 
+             {
+               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+             }
+             try {
+               fcall.sendResponse(fb,msg,msgType,seqid);
+               return;
+             } catch (Exception ex) {
+               LOGGER.error("Exception writing to internal frame buffer", ex);
+             }
+             fb.close();
+           }
+         };
        }
-       throw new IllegalStateException();
-     }
  
-     /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
-     public boolean isSet(_Fields field) {
-       if (field == null) {
-         throw new IllegalArgumentException();
+       protected boolean isOneway() {
+         return false;
        }
  
-       switch (field) {
-       case SUCCESS:
-         return isSetSuccess();
-       case O1:
-         return isSetO1();
+       public void start(I iface, drop_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+         iface.drop_table(args.dbname, args.name, args.deleteData,resultHandler);
        }
-       throw new IllegalStateException();
      }
  
-     @Override
-     public boolean equals(Object that) {
-       if (that == null)
-         return false;
-       if (that instanceof getMetaConf_result)
-         return this.equals((getMetaConf_result)that);
-       return false;
-     }
+     public static class drop_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_table_with_environment_context_args, Void> {
+       public drop_table_with_environment_context() {
+         super("drop_table_with_environment_context");
+       }
  
-     public boolean equals(getMetaConf_result that) {
-       if (that == null)
-         return false;
+       public drop_table_with_environment_context_args getEmptyArgsInstance() {
+         return new drop_table_with_environment_context_args();
+       }
  
-       boolean this_present_success = true && this.isSetSuccess();
-       boolean that_present_success = true && that.isSetSuccess();
-       if (this_present_success || that_present_success) {
-         if (!(this_present_success && that_present_success))
-           return false;
-         if (!this.success.equals(that.success))
-           return false;
+       public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+         final org.apache.thrift.AsyncProcessFunction fcall = this;
+         return new AsyncMethodCallback<Void>() { 
+           public void onComplete(Void o) {
+             drop_table_with_environment_context_result result = new drop_table_with_environment_context_result();
+             try {
+               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+               return;
+             } catch (Exception e) {
+               LOGGER.error("Exception writing to internal frame buffer", e);
+             }
+             fb.close();
+           }
+           public void onError(Exception e) {
+             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+             org.apache.thrift.TBase msg;
+             drop_table_with_environment_context_result result = new drop_table_with_environment_context_result();
+             if (e instanceof NoSuchObjectException) {
+                         result.o1 = (NoSuchObjectException) e;
+                         result.setO1IsSet(true);
+                         msg = result;
+             }
+             else             if (e instanceof MetaException) {
+                         result.o3 = (MetaException) e;
+                         result.setO3IsSet(true);
+                         msg = result;
+             }
+              else 
+             {
+               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+             }
+             try {
+               fcall.sendResponse(fb,msg,msgType,seqid);
+               return;
+             } catch (Exception ex) {
+               LOGGER.error("Exception writing to internal frame buffer", ex);
+             }
+             fb.close();
+           }
+         };
        }
  
-       boolean this_present_o1 = true && this.isSetO1();
-       boolean that_present_o1 = true && that.isSetO1();
-       if (this_present_o1 || that_present_o1) {
-         if (!(this_present_o1 && that_present_o1))
-           return false;
-         if (!this.o1.equals(that.o1))
-           return false;
+       protected boolean isOneway() {
+         return false;
        }
  
-       return true;
+       public void start(I iface, drop_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+         iface.drop_table_with_environment_context(args.dbname, args.name, args.deleteData, args.environment_context,resultHandler);
+       }
      }
  
-     @Override
-     public int hashCode() {
-       HashCodeBuilder builder = new HashCodeBuilder();
- 
-       boolean present_success = true && (isSetSuccess());
-       builder.append(present_success);
-       if (present_success)
-         builder.append(success);
- 
-       boolean present_o1 = true && (isSetO1());
-       builder.append(present_o1);
-       if (present_o1)
-         builder.append(o1);
+     public static class get_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_tables_args, List<String>> {
+       public get_tables() {
+         super("get_tables");
+       }
  
-       return builder.toHashCode();
-     }
+       public get_tables_args getEmptyArgsInstance() {
+         return new get_tables_args();
+       }
  
-     public int compareTo(getMetaConf_result other) {
-       if (!getClass().equals(other.getClass())) {
-         return getClass().getName().compareTo(other.getClass().getName());
+       public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+         final org.apache.thrift.AsyncProcessFunction fcall = this;
+         return new AsyncMethodCallback<List<String>>() { 
+           public void onComplete(List<String> o) {
+             get_tables_result result = new get_tables_result();
+             result.success = o;
+             try {
+               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+               return;
+             } catch (Exception e) {
+               LOGGER.error("Exception writing to internal frame buffer", e);
+             }
+             fb.close();
+           }
+           public void onError(Exception e) {
+             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+             org.apache.thrift.TBase msg;
+             get_tables_result result = new get_tables_result();
+             if (e instanceof MetaException) {
+                         result.o1 = (MetaException) e;
+                         result.setO1IsSet(true);
+                         msg = result;
+             }
+              else 
+             {
+               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+             }
+             try {
+               fcall.sendResponse(fb,msg,msgType,seqid);
+               return;
+             } catch (Exception ex) {
+               LOGGER.error("Exception writing to internal frame buffer", ex);
+             }
+             fb.close();
+           }
+         };
        }
  
-       int lastComparison = 0;
-       getMetaConf_result typedOther = (getMetaConf_result)other;
+       protected boolean isOneway() {
+         return false;
+       }
  
-       lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess());
-       if (lastComparison != 0) {
-         return lastComparison;
+       public void start(I iface, get_tables_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+         iface.get_tables(args.db_name, args.pattern,resultHandler);
        }
-       if (isSetSuccess()) {
-         lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success);
-         if (lastComparison != 0) {
-           return lastComparison;
-         }
+     }
+ 
+     public static class get_all_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_all_tables_args, List<String>> {
+       public get_all_tables() {
+         super("get_all_tables");
        }
-       lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1());
-       if (lastComparison != 0) {
-         return lastComparison;
+ 
+       public get_all_tables_args getEmptyArgsInstance() {
+         return new get_all_tables_args();
        }
-       if (isSetO1()) {
-         lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, typedOther.o1);
-         if (lastComparison != 0) {
-           return lastComparison;
-         }
+ 
+       public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+         final org.apache.thrift.AsyncProcessFunction fcall = this;
+         return new AsyncMethodCallback<List<String>>() { 
+           public void onComplete(List<String> o) {
+             get_all_tables_result result = new get_all_tables_result();
+             result.success = o;
+             try {
+               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+               return;
+             } catch (Exception e) {
+               LOGGER.error("Exception writing to internal frame buffer", e);
+             }
+             fb.close();
+           }
+           public void onError(Exception e) {
+             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+             org.apache.thrift.TBase msg;
+             get_all_tables_result result = new get_all_tables_result();
+             if (e instanceof MetaException) {
+                         result.o1 = (MetaException) e;
+                         result.setO1IsSet(true);
+                         msg = result;
+             }
+              else 
+             {
+               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+             }
+             try {
+               fcall.sendResponse(fb,msg,msgType,seqid);
+               return;
+             } catch (Exception ex) {
+               LOGGER.error("Exception writing to internal frame buffer", ex);
+             }
+             fb.close();
+           }
+         };
        }
-       return 0;
-     }
  
-     public _Fields fieldForId(int fieldId) {
-       return _Fields.findByThriftId(fieldId);
-     }
+       protected boolean isOneway() {
+         return false;
+       }
  
-     public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
-       schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+       public void start(I iface, get_all_tables_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+         iface.get_all_tables(args.db_name,resultHandler);
+       }
      }
  
-     public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
-       schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+     public static class get_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_args, Table> {
+       public get_table() {
+         super("get_table");
        }
  
-     @Override
-     public String toString() {
-       StringBuilder sb = new StringBuilder("getMetaConf_result(");
-       boolean first = true;
+       public get_table_args getEmptyArgsInstance() {
+         return new get_table_args();
+       }
  
-       sb.append("success:");
+       public AsyncMethodCallback<Table> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+         final org.apache.thrift.AsyncProcessFunction fcall = this;
+         return new AsyncMethodCallback<Table>() { 
+           public void onComplete(Table o) {
+             get_table_result result = new get_table_result();
+             result.success = o;
+             try {
+               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+               return;
+             } catch (Exception e) {
+               LOGGER.error("Exception writing to internal frame buffer", e);
+             }
+             fb.close();
+           }
+           public void onError(Exception e) {
+             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+             org.apache.thrift.TBase msg;
+             get_table_result result = new get_table_result();
+             if (e instanceof MetaException) {
+                         result.o1 = (MetaException) e;
+                         result.setO1IsSet(true);
+                         msg = result;
+             }
+             else             if (e instanceof NoSuchObjectException) {
+                         result.o2 = (NoSuchObjectException) e;
+                         result.setO2IsSet(true);
+                         msg = result;
+             }
+              else 
+             {
+               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+             }
+             try {
+               fcall.sendResponse(fb,msg,msgType,seqid);
+               return;
+             } catch (Exception ex) {
+               LOGGER.error("Exception writing to internal frame buffer", ex);
+             }
+             fb.close();
+           }
+         };
+       }
+ 
+       protected boolean isOneway() {
+         return false;
+       }
+ 
+       public void start(I iface, get_table_args args, org.apache.thrift.async.AsyncMethodCallback<Table> resultHandler) throws TException {
+         iface.get_table(args.dbname, args.tbl_name,resultHandler);
+       }
+     }
+ 
+     public static class get_table_objects_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_objects_by_name_args, List<Table>> {
+       public get_table_objects_by_name() {
+         super("get_table_objects_by_name");
+       }
+ 
+       public get_table_objects_by_name_args getEmptyArgsInstance() {
+         return new get_table_objects_by_name_args();
+       }
+ 
+       public AsyncMethodCallback<List<Table>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+         final org.apache.thrift.AsyncProcessFunction fcall = this;
+         return new AsyncMethodCallback<List<Table>>() { 
+           public void onComplete(List<Table> o) {
+             get_table_objects_by_name_result result = new get_table_objects_by_name_result();
+             result.success = o;
+             try {
+               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+               return;
+             } catch (Exception e) {
+               LOGGER.error("Exception writing to internal frame buffer", e);
+             }
+             fb.close();
+           }
+           public void onError(Exception e) {
+             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+             org.apache.thrift.TBase msg;
+             get_table_objects_by_name_result result = new get_table_objects_by_name_result();
+             if (e instanceof MetaException) {
+               

<TRUNCATED>

[32/50] [abbrv] hive git commit: HIVE-11304: Migrate to Log4j2 from Log4j 1.x (Prasanth Jayachandran reviewed by Thejas Nair, Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/testutils/ptest2/src/main/resources/log4j2.xml
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/log4j2.xml b/testutils/ptest2/src/main/resources/log4j2.xml
new file mode 100644
index 0000000..992462e
--- /dev/null
+++ b/testutils/ptest2/src/main/resources/log4j2.xml
@@ -0,0 +1,79 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="PTest2Log4j2"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="hive.ptest.log.threshold">ALL</Property>
+    <Property name="hive.ptest.log.level">DEBUG</Property>
+    <Property name="hive.ptest.root.logger">FILE</Property>
+    <Property name="hive.ptest.log.dir">target</Property>
+    <Property name="hive.ptest.log.file">ptest.log</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n"/>
+    </Console>
+
+    <!-- Size based Rolling File Appender -->
+    <RollingFile name="FILE" fileName="${sys:hive.ptest.log.dir}/${sys:hive.ptest.log.file}"
+     filePattern="${sys:hive.ptest.log.dir}/${sys:hive.ptest.log.file}.%i">
+      <PatternLayout pattern="%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n" />
+      <Policies>
+        <SizeBasedTriggeringPolicy size="50 MB" />
+      </Policies>
+      <DefaultRolloverStrategy max="1"/>
+    </RollingFile>
+
+  </Appenders>
+
+  <Loggers>
+    <Root level="${sys:hive.ptest.log.threshold}">
+      <AppenderRef ref="${sys:hive.ptest.root.logger}" level="${sys:hive.ptest.log.level}"/>
+    </Root>
+
+    <Logger name="org.apache.http" level="INFO">
+      <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
+    </Logger>
+    <Logger name="org.springframework" level="INFO">
+      <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
+    </Logger>
+    <Logger name="org.jclouds" level="INFO">
+      <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
+    </Logger>
+    <Logger name="jclouds" level="INFO">
+      <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.hive" level="DEBUG">
+      <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.http" level="TRACE">
+      <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
+    </Logger>
+
+    <!-- Silence useless ZK logs -->
+    <Logger name="org.apache.zookeeper.server.NIOServerCnxn" level="WARN">
+      <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
+      <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
+    </Logger>
+
+</Configuration>


[17/50] [abbrv] hive git commit: HIVE-11449: "Capacity must be a power of two" error when HybridHashTableContainer memory threshold is too low (Jason Dere, reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11449: "Capacity must be a power of two" error when HybridHashTableContainer memory threshold is too low (Jason Dere, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2ee30c48
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2ee30c48
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2ee30c48

Branch: refs/heads/hbase-metastore
Commit: 2ee30c4859cd3427f0c74af536657d149cbad361
Parents: 763cb02
Author: Jason Dere <jd...@hortonworks.com>
Authored: Tue Aug 11 17:07:47 2015 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Tue Aug 11 17:07:47 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java  | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2ee30c48/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index 0a6461f..ad1246d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -118,6 +118,8 @@ public class HybridHashTableContainer
     public HashPartition(int threshold, float loadFactor, int wbSize, long memUsage,
                          boolean createHashMap) {
       if (createHashMap) {
+        // Hash map should be at least the size of our designated wbSize
+        memUsage = Math.max(memUsage, wbSize);
         hashMap = new BytesBytesMultiHashMap(threshold, loadFactor, wbSize, memUsage);
       } else {
         hashMapSpilledOnCreation = true;


[02/50] [abbrv] hive git commit: HIVE-11397: Parse Hive OR clauses as they are written into the AST (Jesus Camacho Rodriguez, reviewed by Hari Sankar Sivarama Subramaniyan)

Posted by se...@apache.org.
HIVE-11397: Parse Hive OR clauses as they are written into the AST (Jesus Camacho Rodriguez, reviewed by Hari Sankar Sivarama Subramaniyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5abcc6a2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5abcc6a2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5abcc6a2

Branch: refs/heads/hbase-metastore
Commit: 5abcc6a2768793e4b3da95c6c5edb741d6580e65
Parents: 7536ede
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Mon Aug 10 09:44:49 2015 +0300
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Mon Aug 10 09:44:49 2015 +0300

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  2 +-
 .../groupby_multi_single_reducer2.q.out         |  2 +-
 .../groupby_multi_single_reducer3.q.out         |  8 +++----
 .../results/clientpositive/multi_insert.q.out   |  8 +++----
 .../clientpositive/multi_insert_gby.q.out       |  2 +-
 .../multi_insert_lateral_view.q.out             |  4 ++--
 ...i_insert_move_tasks_share_dependencies.q.out | 24 ++++++++++----------
 .../spark/groupby_multi_single_reducer2.q.out   |  2 +-
 .../spark/groupby_multi_single_reducer3.q.out   |  8 +++----
 .../clientpositive/spark/multi_insert.q.out     |  8 +++----
 .../clientpositive/spark/multi_insert_gby.q.out |  2 +-
 .../spark/multi_insert_lateral_view.q.out       |  4 ++--
 ...i_insert_move_tasks_share_dependencies.q.out | 24 ++++++++++----------
 13 files changed, 49 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 0a780af..fe7c1ca 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -5370,8 +5370,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
         GenericUDFOPOr or = new GenericUDFOPOr();
         List<ExprNodeDesc> expressions = new ArrayList<ExprNodeDesc>(2);
-        expressions.add(previous);
         expressions.add(current);
+        expressions.add(previous);
         ExprNodeDesc orExpr =
             new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, or, expressions);
         previous = orExpr;

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
index 2377cd5..972ed51 100644
--- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
+++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
@@ -43,7 +43,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((substr(key, 1, 1) >= 5) or (substr(key, 1, 1) < 5)) (type: boolean)
+              predicate: ((substr(key, 1, 1) < 5) or (substr(key, 1, 1) >= 5)) (type: boolean)
               Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: substr(key, 1, 1) (type: string), key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
index ca0d524..616eaa3 100644
--- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
+++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
@@ -59,7 +59,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200)) or ((value) IN ('val_400', 'val_500') and (key) IN (400, 450))) (type: boolean)
+              predicate: (((value) IN ('val_400', 'val_500') and (key) IN (400, 450)) or ((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200))) (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string)
@@ -225,7 +225,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null)) or (((key + key) = 400) or (((key - 100) = 500) and value is not null))) (type: boolean)
+              predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: value (type: string)
@@ -391,7 +391,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200)) or ((value) IN ('val_400', 'val_500') and (key) IN (400, 450))) (type: boolean)
+              predicate: (((value) IN ('val_400', 'val_500') and (key) IN (400, 450)) or ((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200))) (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string)
@@ -557,7 +557,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null)) or (((key + key) = 400) or (((key - 100) = 500) and value is not null))) (type: boolean)
+              predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert.q.out b/ql/src/test/results/clientpositive/multi_insert.q.out
index 6f321c3..f8fc172 100644
--- a/ql/src/test/results/clientpositive/multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert.q.out
@@ -755,7 +755,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -905,7 +905,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -1123,7 +1123,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -1273,7 +1273,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/multi_insert_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_gby.q.out b/ql/src/test/results/clientpositive/multi_insert_gby.q.out
index 3c51f58..7c5e589 100644
--- a/ql/src/test/results/clientpositive/multi_insert_gby.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_gby.q.out
@@ -47,7 +47,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > 450) or (key > 500)) (type: boolean)
+              predicate: ((key > 500) or (key > 450)) (type: boolean)
               Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
index 121f78c..4723153 100644
--- a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
@@ -671,7 +671,7 @@ STAGE PLANS:
                         Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: double)
             Filter Operator
-              predicate: ((key > 200) or (key < 200)) (type: boolean)
+              predicate: ((key < 200) or (key > 200)) (type: boolean)
               Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
@@ -1342,7 +1342,7 @@ STAGE PLANS:
                             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             Filter Operator
-              predicate: ((key > 200) or (key < 200)) (type: boolean)
+              predicate: ((key < 200) or (key > 200)) (type: boolean)
               Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
index 8f9dd12..935ae75 100644
--- a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
@@ -772,7 +772,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -926,7 +926,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -1148,7 +1148,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -1302,7 +1302,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -2835,7 +2835,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -2971,7 +2971,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -3107,7 +3107,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -3243,7 +3243,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -3417,7 +3417,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: default.src_multi2
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -3664,7 +3664,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: default.src_multi2
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -3921,7 +3921,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: default.src_multi2
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -4256,7 +4256,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: default.src_multi2
             Filter Operator
-              predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+              predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
               Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
index 7903302..5d15040 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
@@ -48,7 +48,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((substr(key, 1, 1) >= 5) or (substr(key, 1, 1) < 5)) (type: boolean)
+                    predicate: ((substr(key, 1, 1) < 5) or (substr(key, 1, 1) >= 5)) (type: boolean)
                     Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: substr(key, 1, 1) (type: string), key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
index 4ac7009..5192dbb 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
@@ -64,7 +64,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200)) or ((value) IN ('val_400', 'val_500') and (key) IN (400, 450))) (type: boolean)
+                    predicate: (((value) IN ('val_400', 'val_500') and (key) IN (400, 450)) or ((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200))) (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -236,7 +236,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null)) or (((key + key) = 400) or (((key - 100) = 500) and value is not null))) (type: boolean)
+                    predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: string)
@@ -408,7 +408,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200)) or ((value) IN ('val_400', 'val_500') and (key) IN (400, 450))) (type: boolean)
+                    predicate: (((value) IN ('val_400', 'val_500') and (key) IN (400, 450)) or ((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200))) (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -580,7 +580,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null)) or (((key + key) = 400) or (((key - 100) = 500) and value is not null))) (type: boolean)
+                    predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert.q.out b/ql/src/test/results/clientpositive/spark/multi_insert.q.out
index c77a691..117133a 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert.q.out
@@ -596,7 +596,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -742,7 +742,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -888,7 +888,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -1034,7 +1034,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
index 6eae46e..9eeabb4 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
@@ -52,7 +52,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 450) or (key > 500)) (type: boolean)
+                    predicate: ((key > 500) or (key > 450)) (type: boolean)
                     Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
index 6dc5747..6aec979 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
@@ -597,7 +597,7 @@ STAGE PLANS:
                   alias: src_10
                   Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 200) or (key < 200)) (type: boolean)
+                    predicate: ((key < 200) or (key > 200)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -1267,7 +1267,7 @@ STAGE PLANS:
                   alias: src_10
                   Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 200) or (key < 200)) (type: boolean)
+                    predicate: ((key < 200) or (key > 200)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: string), key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
index cddf923..2bcf1bf 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
@@ -613,7 +613,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -763,7 +763,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -913,7 +913,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -1063,7 +1063,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -2365,7 +2365,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -2492,7 +2492,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -2619,7 +2619,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -2746,7 +2746,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -2911,7 +2911,7 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                             name: default.src_multi2
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -3149,7 +3149,7 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                             name: default.src_multi2
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -3387,7 +3387,7 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                             name: default.src_multi2
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -3625,7 +3625,7 @@ STAGE PLANS:
                             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                             name: default.src_multi2
                   Filter Operator
-                    predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
+                    predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                     Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)


[18/50] [abbrv] hive git commit: HIVE-11348: Support START TRANSACTION/COMMIT/ROLLBACK commands: support SQL2011 reserved keywords (Pengcheng Xiong reviewed by Eugene Koifman)

Posted by se...@apache.org.
HIVE-11348: Support START TRANSACTION/COMMIT/ROLLBACK commands: support SQL2011 reserved keywords (Pengcheng Xiong reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/df138f2b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/df138f2b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/df138f2b

Branch: refs/heads/hbase-metastore
Commit: df138f2b133c62ac31021c179ce2d04cabcf210e
Parents: 2ee30c4
Author: Pengcheng Xiong <px...@apache.org>
Authored: Wed Aug 12 10:04:54 2015 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Wed Aug 12 10:05:49 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/parse/IdentifiersParser.g | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/df138f2b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index 501287d..64af7d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -614,7 +614,13 @@ principalIdentifier
     | QuotedIdentifier
     ;
 
-//the new version of nonReserved + sql11ReservedKeywordsUsedAsIdentifier = old version of nonReserved 
+//The new version of nonReserved + sql11ReservedKeywordsUsedAsIdentifier = old version of nonReserved
+//Non reserved keywords are basically the keywords that can be used as identifiers.
+//All the KW_* are automatically not only keywords, but also reserved keywords.
+//That means, they can NOT be used as identifiers.
+//If you would like to use them as identifiers, put them in the nonReserved list below.
+//If you are not sure, please refer to the SQL2011 column in
+//http://www.postgresql.org/docs/9.5/static/sql-keywords-appendix.html
 nonReserved
     :
     KW_ADD | KW_ADMIN | KW_AFTER | KW_ANALYZE | KW_ARCHIVE | KW_ASC | KW_BEFORE | KW_BUCKET | KW_BUCKETS
@@ -636,11 +642,7 @@ nonReserved
     | KW_TINYINT | KW_TOUCH | KW_TRANSACTIONS | KW_UNARCHIVE | KW_UNDO | KW_UNIONTYPE | KW_UNLOCK | KW_UNSET
     | KW_UNSIGNED | KW_URI | KW_USE | KW_UTC | KW_UTCTIMESTAMP | KW_VALUE_TYPE | KW_VIEW | KW_WHILE | KW_YEAR
     | KW_WORK
-    | KW_START
     | KW_TRANSACTION
-    | KW_COMMIT
-    | KW_ROLLBACK
-    | KW_ONLY
     | KW_WRITE
     | KW_ISOLATION
     | KW_LEVEL
@@ -648,13 +650,15 @@ nonReserved
     | KW_AUTOCOMMIT
 ;
 
-//The following SQL2011 reserved keywords are used as cast function name only, it is a subset of the sql11ReservedKeywordsUsedAsIdentifier.
+//The following SQL2011 reserved keywords are used as cast function name only, but not as identifiers.
 sql11ReservedKeywordsUsedAsCastFunctionName
     :
     KW_BIGINT | KW_BINARY | KW_BOOLEAN | KW_CURRENT_DATE | KW_CURRENT_TIMESTAMP | KW_DATE | KW_DOUBLE | KW_FLOAT | KW_INT | KW_SMALLINT | KW_TIMESTAMP
     ;
 
 //The following SQL2011 reserved keywords are used as identifiers in many q tests, they may be added back due to backward compatibility.
+//We are planning to remove the following whole list after several releases.
+//Thus, please do not change the following list unless you know what to do.
 sql11ReservedKeywordsUsedAsIdentifier
     :
     KW_ALL | KW_ALTER | KW_ARRAY | KW_AS | KW_AUTHORIZATION | KW_BETWEEN | KW_BIGINT | KW_BINARY | KW_BOOLEAN 


[34/50] [abbrv] hive git commit: HIVE-11304: Migrate to Log4j2 from Log4j 1.x (Prasanth Jayachandran reviewed by Thejas Nair, Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11304: Migrate to Log4j2 from Log4j 1.x (Prasanth Jayachandran reviewed by Thejas Nair, Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c93d6c77
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c93d6c77
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c93d6c77

Branch: refs/heads/hbase-metastore
Commit: c93d6c77e31e2eb9b40f5167ab3491d44eae351a
Parents: a4849cb
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Fri Aug 14 10:17:20 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Fri Aug 14 10:17:20 2015 -0700

----------------------------------------------------------------------
 accumulo-handler/pom.xml                        |   4 -
 .../src/main/resources/beeline-log4j.properties |  24 --
 beeline/src/main/resources/beeline-log4j2.xml   |  40 ++++
 bin/ext/beeline.sh                              |   2 +-
 bin/hive                                        |   3 +
 common/pom.xml                                  |  22 +-
 .../org/apache/hadoop/hive/common/LogUtils.java |  18 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   8 +-
 common/src/main/resources/hive-log4j.properties |  88 --------
 common/src/main/resources/hive-log4j2.xml       | 111 +++++++++
 .../hadoop/hive/conf/TestHiveLogging.java       |   8 +-
 .../resources/hive-exec-log4j-test.properties   |  59 -----
 .../test/resources/hive-exec-log4j2-test.xml    |  86 +++++++
 .../test/resources/hive-log4j-test.properties   |  71 ------
 common/src/test/resources/hive-log4j2-test.xml  |  95 ++++++++
 data/conf/hive-log4j-old.properties             |  82 -------
 data/conf/hive-log4j.properties                 |  97 --------
 data/conf/hive-log4j2.xml                       | 148 ++++++++++++
 data/conf/spark/log4j.properties                |  24 --
 data/conf/spark/log4j2.xml                      |  74 ++++++
 docs/xdocs/language_manual/cli.xml              |   2 +-
 hcatalog/bin/hcat_server.sh                     |   2 +-
 hcatalog/bin/templeton.cmd                      |   4 +-
 hcatalog/scripts/hcat_server_start.sh           |   2 +-
 .../content/xdocs/configuration.xml             |   2 +-
 .../src/documentation/content/xdocs/install.xml |   2 +-
 .../deployers/config/hive/hive-log4j.properties |  88 --------
 .../deployers/config/hive/hive-log4j2.xml       | 111 +++++++++
 .../templeton/deployers/start_hive_services.sh  |   2 +-
 .../webhcat/svr/src/main/bin/webhcat_server.sh  |   4 +-
 .../src/main/config/webhcat-log4j.properties    |  45 ----
 .../svr/src/main/config/webhcat-log4j2.xml      |  75 +++++++
 .../main/java/org/apache/hive/hplsql/Exec.java  |   2 +
 .../operation/TestOperationLoggingLayout.java   | 136 +++++++++++
 itests/pom.xml                                  |   2 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   2 +-
 .../metastore/txn/TestCompactionTxnHandler.java |  40 +++-
 .../hive/metastore/txn/TestTxnHandler.java      |  66 ++++--
 packaging/src/main/assembly/bin.xml             |  14 +-
 pom.xml                                         |  37 ++-
 ql/pom.xml                                      |  17 +-
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |  29 ++-
 .../hive/ql/exec/mr/HadoopJobExecHelper.java    |  20 +-
 .../ql/io/rcfile/stats/PartialScanTask.java     |  20 +-
 .../hadoop/hive/ql/log/HiveEventCounter.java    | 135 +++++++++++
 .../apache/hadoop/hive/ql/log/NullAppender.java |  63 ++++++
 .../ql/log/PidDailyRollingFileAppender.java     |  33 ---
 .../hive/ql/log/PidFilePatternConverter.java    |  62 ++++++
 .../main/resources/hive-exec-log4j.properties   |  77 -------
 ql/src/main/resources/hive-exec-log4j2.xml      | 110 +++++++++
 .../hadoop/hive/ql/log/TestLog4j2Appenders.java |  95 ++++++++
 .../hadoop/hive/ql/metadata/StringAppender.java | 128 +++++++++++
 .../hadoop/hive/ql/metadata/TestHive.java       |  50 +++--
 .../hive/service/cli/CLIServiceUtils.java       |   7 -
 .../cli/operation/LogDivertAppender.java        | 223 +++++++++++--------
 .../service/cli/operation/OperationManager.java |  17 +-
 shims/common/pom.xml                            |  17 +-
 .../hadoop/hive/shims/HiveEventCounter.java     | 102 ---------
 .../src/test/resources/log4j.properties         |  23 --
 spark-client/src/test/resources/log4j2.xml      |  39 ++++
 storage-api/pom.xml                             |   7 -
 testutils/ptest2/pom.xml                        |  20 ++
 .../ptest2/src/main/resources/log4j.properties  |  37 ---
 testutils/ptest2/src/main/resources/log4j2.xml  |  79 +++++++
 64 files changed, 1989 insertions(+), 1123 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/accumulo-handler/pom.xml
----------------------------------------------------------------------
diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index ee40f72..4e3a087 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -91,10 +91,6 @@
       <artifactId>slf4j-api</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-    </dependency>
-    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/beeline/src/main/resources/beeline-log4j.properties
----------------------------------------------------------------------
diff --git a/beeline/src/main/resources/beeline-log4j.properties b/beeline/src/main/resources/beeline-log4j.properties
deleted file mode 100644
index fe47d94..0000000
--- a/beeline/src/main/resources/beeline-log4j.properties
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-log4j.rootLogger=WARN, console
-
-######## console appender ########
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
-log4j.appender.console.encoding=UTF-8

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/beeline/src/main/resources/beeline-log4j2.xml
----------------------------------------------------------------------
diff --git a/beeline/src/main/resources/beeline-log4j2.xml b/beeline/src/main/resources/beeline-log4j2.xml
new file mode 100644
index 0000000..5f09741
--- /dev/null
+++ b/beeline/src/main/resources/beeline-log4j2.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="BeelineLog4j2"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.level">WARN</Property>
+    <Property name="hive.root.logger">console</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n"/>
+    </Console>
+  </Appenders>
+
+  <Loggers>
+    <Root level="${sys:hive.log.threshold}">
+      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+    </Root>
+  </Loggers>
+
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/bin/ext/beeline.sh
----------------------------------------------------------------------
diff --git a/bin/ext/beeline.sh b/bin/ext/beeline.sh
index ab3dc1a..9de8f6c 100644
--- a/bin/ext/beeline.sh
+++ b/bin/ext/beeline.sh
@@ -31,7 +31,7 @@ beeline () {
     hadoopClasspath="${HADOOP_CLASSPATH}:"
   fi
   export HADOOP_CLASSPATH="${hadoopClasspath}${HIVE_CONF_DIR}:${beelineJarPath}:${superCsvJarPath}:${jlineJarPath}:${jdbcStandaloneJarPath}"
-  export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Dlog4j.configuration=beeline-log4j.properties "
+  export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Dlog4j.configurationFile=beeline-log4j2.xml "
 
   exec $HADOOP jar ${beelineJarPath} $CLASS $HIVE_OPTS "$@"
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/bin/hive
----------------------------------------------------------------------
diff --git a/bin/hive b/bin/hive
index 5dc93fb..ad7139e 100755
--- a/bin/hive
+++ b/bin/hive
@@ -193,6 +193,9 @@ if [ "$HADOOP_HOME" == "" ]; then
   exit 4;
 fi
 
+# to avoid errors from log4j2 automatic configuration loading
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Dlog4j.configurationFile=hive-log4j2.xml "
+
 HADOOP=$HADOOP_HOME/bin/hadoop
 if [ ! -f ${HADOOP} ]; then
   echo "Cannot find hadoop installation: \$HADOOP_HOME or \$HADOOP_PREFIX must be set or hadoop must be in the path";

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/common/pom.xml
----------------------------------------------------------------------
diff --git a/common/pom.xml b/common/pom.xml
index a7997e2..dba814d 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -66,14 +66,24 @@
       <version>${joda.version}</version>
     </dependency>
     <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <version>${log4j.version}</version>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-1.2-api</artifactId>
+      <version>${log4j2.version}</version>
     </dependency>
     <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>apache-log4j-extras</artifactId>
-      <version>${log4j-extras.version}</version>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-web</artifactId>
+      <version>${log4j2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-slf4j-impl</artifactId>
+      <version>${log4j2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-jcl</artifactId>
+      <version>${log4j2.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.commons</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
index 9118675..3ca5c0f 100644
--- a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java
@@ -18,26 +18,22 @@
 
 package org.apache.hadoop.hive.common;
 
-import java.net.URL;
 import java.io.File;
-import java.io.IOException;
-import java.io.FileNotFoundException;
+import java.net.URL;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.PropertyConfigurator;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.logging.log4j.core.config.Configurator;
 
 /**
  * Utilities common to logging operations.
  */
 public class LogUtils {
 
-  private static final String HIVE_L4J = "hive-log4j.properties";
-  private static final String HIVE_EXEC_L4J = "hive-exec-log4j.properties";
+  private static final String HIVE_L4J = "hive-log4j2.xml";
+  private static final String HIVE_EXEC_L4J = "hive-exec-log4j2.xml";
   private static final Log l4j = LogFactory.getLog(LogUtils.class);
 
   @SuppressWarnings("serial")
@@ -95,8 +91,7 @@ public class LogUtils {
           }
           System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId);
         }
-        LogManager.resetConfiguration();
-        PropertyConfigurator.configure(log4jFileName);
+        Configurator.initialize(null, log4jFileName);
         logConfigLocation(conf);
         return ("Logging initialized using configuration in " + log4jConfigFile);
       }
@@ -123,8 +118,7 @@ public class LogUtils {
         break;
     }
     if (hive_l4j != null) {
-      LogManager.resetConfiguration();
-      PropertyConfigurator.configure(hive_l4j);
+      Configurator.initialize(null, hive_l4j.toString());
       logConfigLocation(conf);
       return (logMessage + "\n" + "Logging initialized using configuration in " + hive_l4j);
     } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 11b9f78..730f5be 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1696,13 +1696,13 @@ public class HiveConf extends Configuration {
     // logging configuration
     HIVE_LOG4J_FILE("hive.log4j.file", "",
         "Hive log4j configuration file.\n" +
-        "If the property is not set, then logging will be initialized using hive-log4j.properties found on the classpath.\n" +
-        "If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.properties\"), \n" +
+        "If the property is not set, then logging will be initialized using hive-log4j2.xml found on the classpath.\n" +
+        "If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
         "which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
     HIVE_EXEC_LOG4J_FILE("hive.exec.log4j.file", "",
         "Hive log4j configuration file for execution mode(sub command).\n" +
-        "If the property is not set, then logging will be initialized using hive-exec-log4j.properties found on the classpath.\n" +
-        "If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.properties\"), \n" +
+        "If the property is not set, then logging will be initialized using hive-exec-log4j2.xml found on the classpath.\n" +
+        "If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
         "which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
 
     HIVE_LOG_EXPLAIN_OUTPUT("hive.log.explain.output", false,

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/common/src/main/resources/hive-log4j.properties
----------------------------------------------------------------------
diff --git a/common/src/main/resources/hive-log4j.properties b/common/src/main/resources/hive-log4j.properties
deleted file mode 100644
index 14fa725..0000000
--- a/common/src/main/resources/hive-log4j.properties
+++ /dev/null
@@ -1,88 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hive.log.threshold=ALL
-hive.root.logger=INFO,DRFA
-hive.log.dir=${java.io.tmpdir}/${user.name}
-hive.log.file=hive.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshold=${hive.log.threshold}
-
-#
-# Daily Rolling File Appender
-#
-# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files
-# for different CLI session.
-#
-# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-
-log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
-log4j.appender.console.encoding=UTF-8
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,DRFA
-log4j.category.Datastore=ERROR,DRFA
-log4j.category.Datastore.Schema=ERROR,DRFA
-log4j.category.JPOX.Datastore=ERROR,DRFA
-log4j.category.JPOX.Plugin=ERROR,DRFA
-log4j.category.JPOX.MetaData=ERROR,DRFA
-log4j.category.JPOX.Query=ERROR,DRFA
-log4j.category.JPOX.General=ERROR,DRFA
-log4j.category.JPOX.Enhancer=ERROR,DRFA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/common/src/main/resources/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/common/src/main/resources/hive-log4j2.xml b/common/src/main/resources/hive-log4j2.xml
new file mode 100644
index 0000000..31b8fcc
--- /dev/null
+++ b/common/src/main/resources/hive-log4j2.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="HiveLog4j2"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.level">INFO</Property>
+    <Property name="hive.root.logger">DRFA</Property>
+    <Property name="hive.log.dir">${sys:java.io.tmpdir}/${sys:user.name}</Property>
+    <Property name="hive.log.file">hive.log</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n"/>
+    </Console>
+
+    <!-- Regular File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- <File name="FA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+    </File> -->
+
+    <!-- Daily Rolling File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session -->
+    <RollingFile name="DRFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
+     filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+      <Policies>
+        <!-- Rollover at mignight (interval = 1 means daily) -->
+        <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
+      </Policies>
+      <!-- 30-day backup -->
+      <!-- <DefaultRolloverStrategy max="30"/> -->
+    </RollingFile>
+
+    <!-- Size based Rolling File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- <RollingFile name="RFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
+     filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%i">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+      <Policies>
+        <SizeBasedTriggeringPolicy size="256 MB" />
+      </Policies>
+      <DefaultRolloverStrategy max="10"/>
+    </RollingFile> -->
+
+    <!-- HiveEventCounter appender is loaded from Configuration packages attribute.Sends counts of logging messages at different severity levels to Hadoop Metrics. -->
+    <HiveEventCounter name="EventCounter"/>
+  </Appenders>
+
+  <Loggers>
+    <Root level="${sys:hive.log.threshold}">
+      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+      <AppenderRef ref="EventCounter" />
+    </Root>
+
+    <Logger name="org.apache.zookeeper.server.NIOServerCnxn" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="DataNucleus" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore.Schema" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Plugin" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Metadata" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Query" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.General" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Enhancer" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+  </Loggers>
+
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java
----------------------------------------------------------------------
diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java b/common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java
index d5cedb1..92269e7 100644
--- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java
+++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java
@@ -21,12 +21,12 @@ import java.io.BufferedReader;
 import java.io.File;
 import java.io.InputStreamReader;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.hive.common.LogUtils;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hive.common.util.HiveTestUtils;
 
+import junit.framework.TestCase;
+
 /**
  * TestHiveLogging
  *
@@ -104,9 +104,9 @@ public class TestHiveLogging extends TestCase {
     // customized log4j config log file to be: /${test.tmp.dir}/TestHiveLogging/hiveLog4jTest.log
     File customLogPath = new File(new File(System.getProperty("test.tmp.dir")),
         System.getProperty("user.name") + "-TestHiveLogging/");
-    String customLogName = "hiveLog4jTest.log";
+    String customLogName = "hiveLog4j2Test.log";
     File customLogFile = new File(customLogPath, customLogName);
     RunTest(customLogFile,
-      "hive-log4j-test.properties", "hive-exec-log4j-test.properties");
+      "hive-log4j2-test.xml", "hive-exec-log4j2-test.xml");
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/common/src/test/resources/hive-exec-log4j-test.properties
----------------------------------------------------------------------
diff --git a/common/src/test/resources/hive-exec-log4j-test.properties b/common/src/test/resources/hive-exec-log4j-test.properties
deleted file mode 100644
index 1e53f26..0000000
--- a/common/src/test/resources/hive-exec-log4j-test.properties
+++ /dev/null
@@ -1,59 +0,0 @@
-# Define some default values that can be overridden by system properties
-hive.root.logger=INFO,FA
-hive.log.dir=/${test.tmp.dir}/${user.name}-TestHiveLogging
-hive.log.file=hiveExecLog4jTest.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=WARN
-
-#
-# File Appender
-#
-
-log4j.appender.FA=org.apache.log4j.FileAppender
-log4j.appender.FA.File=${hive.log.dir}/${hive.log.file}
-log4j.appender.FA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.metrics.jvm.EventCounter
-
-
-log4j.category.DataNucleus=ERROR,FA
-log4j.category.Datastore=ERROR,FA
-log4j.category.Datastore.Schema=ERROR,FA
-log4j.category.JPOX.Datastore=ERROR,FA
-log4j.category.JPOX.Plugin=ERROR,FA
-log4j.category.JPOX.MetaData=ERROR,FA
-log4j.category.JPOX.Query=ERROR,FA
-log4j.category.JPOX.General=ERROR,FA
-log4j.category.JPOX.Enhancer=ERROR,FA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/common/src/test/resources/hive-exec-log4j2-test.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/hive-exec-log4j2-test.xml b/common/src/test/resources/hive-exec-log4j2-test.xml
new file mode 100644
index 0000000..b5f2cb4
--- /dev/null
+++ b/common/src/test/resources/hive-exec-log4j2-test.xml
@@ -0,0 +1,86 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="HiveExecLog4j2Test"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.level">INFO</Property>
+    <Property name="hive.root.logger">FA</Property>
+    <Property name="hive.log.dir">${sys:test.tmp.dir}/${sys:user.name}-TestHiveLogging</Property>
+    <Property name="hive.log.file">hiveExecLog4j2Test.log</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n"/>
+    </Console>
+
+    <File name="FA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}">
+      <PatternLayout pattern="%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n" />
+    </File>
+
+    <!-- HiveEventCounter appender is loaded from Configuration packages attribute.Sends counts of logging messages at different severity levels to Hadoop Metrics. -->
+    <HiveEventCounter name="EventCounter"/>
+  </Appenders>
+
+  <Loggers>
+    <Root level="${sys:hive.log.threshold}">
+      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+      <AppenderRef ref="EventCounter" />
+    </Root>
+
+    <!-- Silence useless ZK logs -->
+    <Logger name="org.apache.zookeeper.server.NIOServerCnxn" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+
+    <Logger name="DataNucleus" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore.Schema" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Plugin" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Metadata" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Query" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.General" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Enhancer" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+  </Loggers>
+
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/common/src/test/resources/hive-log4j-test.properties
----------------------------------------------------------------------
diff --git a/common/src/test/resources/hive-log4j-test.properties b/common/src/test/resources/hive-log4j-test.properties
deleted file mode 100644
index 0348325..0000000
--- a/common/src/test/resources/hive-log4j-test.properties
+++ /dev/null
@@ -1,71 +0,0 @@
-# Define some default values that can be overridden by system properties
-hive.root.logger=WARN,DRFA
-hive.log.dir=${test.tmp.dir}/${user.name}-TestHiveLogging
-hive.log.file=hiveLog4jTest.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=WARN
-
-#
-# Daily Rolling File Appender
-#
-# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files
-# for different CLI session.
-#
-# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-
-log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.console.encoding=UTF-8
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.metrics.jvm.EventCounter
-
-
-log4j.category.DataNucleus=ERROR,DRFA
-log4j.category.Datastore=ERROR,DRFA
-log4j.category.Datastore.Schema=ERROR,DRFA
-log4j.category.JPOX.Datastore=ERROR,DRFA
-log4j.category.JPOX.Plugin=ERROR,DRFA
-log4j.category.JPOX.MetaData=ERROR,DRFA
-log4j.category.JPOX.Query=ERROR,DRFA
-log4j.category.JPOX.General=ERROR,DRFA
-log4j.category.JPOX.Enhancer=ERROR,DRFA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/common/src/test/resources/hive-log4j2-test.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/hive-log4j2-test.xml b/common/src/test/resources/hive-log4j2-test.xml
new file mode 100644
index 0000000..63b46c8
--- /dev/null
+++ b/common/src/test/resources/hive-log4j2-test.xml
@@ -0,0 +1,95 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="HiveLog4j2Test"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.level">WARN</Property>
+    <Property name="hive.root.logger">DRFA</Property>
+    <Property name="hive.log.dir">${sys:test.tmp.dir}/${sys:user.name}-TestHiveLogging</Property>
+    <Property name="hive.log.file">hiveLog4j2Test.log</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n"/>
+    </Console>
+
+    <!-- Daily Rolling File Appender -->
+    <!-- Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session -->
+    <RollingFile name="DRFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
+     filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}">
+      <PatternLayout pattern="%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n" />
+      <Policies>
+        <!-- Rollover at mignight (interval = 1 means daily) -->
+        <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
+      </Policies>
+      <!-- 30-day backup -->
+      <!-- <DefaultRolloverStrategy max="30"/> -->
+    </RollingFile>
+
+    <!-- HiveEventCounter appender is loaded from Configuration packages attribute.Sends counts of logging messages at different severity levels to Hadoop Metrics. -->
+    <HiveEventCounter name="EventCounter"/>
+  </Appenders>
+
+  <Loggers>
+    <Root level="${sys:hive.log.threshold}">
+      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+      <AppenderRef ref="EventCounter" />
+    </Root>
+
+    <!-- Silence useless ZK logs -->
+    <Logger name="org.apache.zookeeper.server.NIOServerCnxn" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+
+    <Logger name="DataNucleus" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore.Schema" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Plugin" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Metadata" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Query" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.General" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Enhancer" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+  </Loggers>
+
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/data/conf/hive-log4j-old.properties
----------------------------------------------------------------------
diff --git a/data/conf/hive-log4j-old.properties b/data/conf/hive-log4j-old.properties
deleted file mode 100644
index f274b8c..0000000
--- a/data/conf/hive-log4j-old.properties
+++ /dev/null
@@ -1,82 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hive.root.logger=DEBUG,DRFA
-hive.log.dir=${build.dir.hive}/ql/tmp/
-hive.log.file=hive.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=WARN
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,DRFA
-log4j.category.Datastore=ERROR,DRFA
-log4j.category.Datastore.Schema=ERROR,DRFA
-log4j.category.JPOX.Datastore=ERROR,DRFA
-log4j.category.JPOX.Plugin=ERROR,DRFA
-log4j.category.JPOX.MetaData=ERROR,DRFA
-log4j.category.JPOX.Query=ERROR,DRFA
-log4j.category.JPOX.General=ERROR,DRFA
-log4j.category.JPOX.Enhancer=ERROR,DRFA
-log4j.logger.org.apache.hadoop.conf.Configuration=ERROR,DRFA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/data/conf/hive-log4j.properties
----------------------------------------------------------------------
diff --git a/data/conf/hive-log4j.properties b/data/conf/hive-log4j.properties
deleted file mode 100644
index 023e3c2..0000000
--- a/data/conf/hive-log4j.properties
+++ /dev/null
@@ -1,97 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hive.root.logger=DEBUG,DRFA
-hive.log.dir=${test.tmp.dir}/log/
-hive.log.file=hive.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=WARN
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
-
-# Warning: If you enable the following appender it will fill up your disk if you don't have a cleanup job!
-# This uses the updated rolling file appender from log4j-extras that supports a reliable time-based rolling policy.
-# See http://logging.apache.org/log4j/companions/extras/apidocs/org/apache/log4j/rolling/TimeBasedRollingPolicy.html
-# Add "DAILY" to hive.root.logger above if you want to use this.
-log4j.appender.DAILY=org.apache.log4j.rolling.RollingFileAppender
-log4j.appender.DAILY.rollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy
-log4j.appender.DAILY.rollingPolicy.ActiveFileName=${hive.log.dir}/${hive.log.file}
-log4j.appender.DAILY.rollingPolicy.FileNamePattern=${hive.log.dir}/${hive.log.file}.%d{yyyy-MM-dd}
-log4j.appender.DAILY.layout=org.apache.log4j.PatternLayout
-log4j.appender.DAILY.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,DRFA
-log4j.category.Datastore=ERROR,DRFA
-log4j.category.Datastore.Schema=ERROR,DRFA
-log4j.category.JPOX.Datastore=ERROR,DRFA
-log4j.category.JPOX.Plugin=ERROR,DRFA
-log4j.category.JPOX.MetaData=ERROR,DRFA
-log4j.category.JPOX.Query=ERROR,DRFA
-log4j.category.JPOX.General=ERROR,DRFA
-log4j.category.JPOX.Enhancer=ERROR,DRFA
-log4j.logger.org.apache.hadoop.conf.Configuration=ERROR,DRFA
-log4j.logger.org.apache.zookeeper=INFO,DRFA
-log4j.logger.org.apache.zookeeper.server.ServerCnxn=WARN,DRFA
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
-log4j.logger.org.apache.zookeeper.ClientCnxn=WARN,DRFA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocket=WARN,DRFA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA
-log4j.logger.org.apache.hadoop.hive.ql.log.PerfLogger=${hive.ql.log.PerfLogger.level}
-log4j.logger.org.apache.hadoop.hive.ql.exec.Operator=INFO,DRFA
-log4j.logger.org.apache.hadoop.hive.serde2.lazy=INFO,DRFA
-log4j.logger.org.apache.hadoop.hive.metastore.ObjectStore=INFO,DRFA

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/data/conf/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/data/conf/hive-log4j2.xml b/data/conf/hive-log4j2.xml
new file mode 100644
index 0000000..c9adfa2
--- /dev/null
+++ b/data/conf/hive-log4j2.xml
@@ -0,0 +1,148 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="HiveLog4j2"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.level">DEBUG</Property>
+    <Property name="hive.root.logger">DRFA</Property>
+    <Property name="hive.log.dir">${sys:test.tmp.dir}/log</Property>
+    <Property name="hive.log.file">hive.log</Property>
+    <Property name="hive.ql.log.PerfLogger.level">INFO</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n"/>
+    </Console>
+
+    <!-- Regular File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- <File name="FA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+    </File> -->
+
+    <!-- Daily Rolling File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session -->
+    <RollingFile name="DRFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
+     filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+      <Policies>
+        <!-- Rollover at mignight (interval = 1 means daily) -->
+        <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
+      </Policies>
+      <!-- 30-day backup -->
+      <!-- <DefaultRolloverStrategy max="30"/> -->
+    </RollingFile>
+
+    <!-- Size based Rolling File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- <RollingFile name="RFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
+     filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%i">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+      <Policies>
+        <SizeBasedTriggeringPolicy size="256 MB" />
+      </Policies>
+      <DefaultRolloverStrategy max="10"/>
+    </RollingFile> -->
+
+    <!-- HiveEventCounter appender is loaded from Configuration packages attribute.Sends counts of logging messages at different severity levels to Hadoop Metrics. -->
+    <HiveEventCounter name="EventCounter"/>
+  </Appenders>
+
+  <Loggers>
+    <Root level="${sys:hive.log.threshold}">
+      <AppenderRef ref="${sys:hive.root.logger}" level="{sys:hive.log.level}"/>
+      <AppenderRef ref="EventCounter" />
+    </Root>
+
+    <Logger name="org.apache.hadoop.conf.Configuration" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper" level="INFO">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.server.ServerCnxn" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.server.NIOServerCnxn" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.ClientCnxn" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.ClientCnxnSocket" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.hadoop.hive.ql.log.PerfLogger" level="${sys:hive.ql.log.PerfLogger.level}">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.hadoop.hive.ql.exec.Operator" level="INFO">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.hadoop.hive.serde2.lazy" level="INFO">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.hadoop.hive.metastore.ObjectStore" level="INFO">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="DataNucleus" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore.Schema" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Plugin" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Metadata" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Query" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.General" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Enhancer" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Enhancer" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Enhancer" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Enhancer" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+  </Loggers>
+
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/data/conf/spark/log4j.properties
----------------------------------------------------------------------
diff --git a/data/conf/spark/log4j.properties b/data/conf/spark/log4j.properties
deleted file mode 100644
index 8838c0e..0000000
--- a/data/conf/spark/log4j.properties
+++ /dev/null
@@ -1,24 +0,0 @@
-log4j.rootCategory=DEBUG, DRFA
-
-hive.spark.log.dir=target/tmp/log
-# Settings to quiet third party logs that are too verbose
-log4j.logger.org.eclipse.jetty=WARN
-log4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR
-log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
-log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-
-log4j.appender.DRFA.File=${hive.spark.log.dir}/spark.log
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/data/conf/spark/log4j2.xml
----------------------------------------------------------------------
diff --git a/data/conf/spark/log4j2.xml b/data/conf/spark/log4j2.xml
new file mode 100644
index 0000000..395a2bf
--- /dev/null
+++ b/data/conf/spark/log4j2.xml
@@ -0,0 +1,74 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="SparkLog4j2"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="spark.log.level">DEBUG</Property>
+    <Property name="spark.root.logger">DRFA</Property>
+    <Property name="spark.log.dir">target/tmp/log</Property>
+    <Property name="spark.log.file">spark.log</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n"/>
+    </Console>
+
+    <!-- Regular File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- <File name="FA" fileName="${sys:spark.log.dir}/${sys:spark.log.file}">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+    </File> -->
+
+    <!-- Daily Rolling File Appender -->
+    <!-- Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session -->
+    <RollingFile name="DRFA" fileName="${sys:spark.log.dir}/${sys:spark.log.file}"
+     filePattern="${sys:spark.log.dir}/${sys:spark.log.file}.%d{yyyy-MM-dd}">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+      <Policies>
+        <!-- Rollover at mignight (interval = 1 means daily) -->
+        <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
+      </Policies>
+      <!-- 30-day backup -->
+      <!-- <DefaultRolloverStrategy max="30"/> -->
+    </RollingFile>
+
+  </Appenders>
+
+  <Loggers>
+    <Root level="DEBUG">
+      <AppenderRef ref="${sys:spark.root.logger}" level="${sys:spark.log.level}"/>
+    </Root>
+
+    <Logger name="org.apache.spark.repl.SparkIMain$exprTyper" level="INFO">
+      <AppenderRef ref="${sys:spark.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.spark.repl.SparkILoop$SparkILoopInterpreter" level="INFO">
+      <AppenderRef ref="${sys:spark.root.logger}"/>
+    </Logger>
+    <Logger name="org.eclipse.jetty" level="WARN">
+      <AppenderRef ref="${sys:spark.root.logger}"/>
+    </Logger>
+    <Logger name="org.eclipse.jetty.util.component.AbstractLifeCycle" level="ERROR">
+      <AppenderRef ref="${sys:spark.root.logger}"/>
+    </Logger>
+  </Loggers>
+
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/docs/xdocs/language_manual/cli.xml
----------------------------------------------------------------------
diff --git a/docs/xdocs/language_manual/cli.xml b/docs/xdocs/language_manual/cli.xml
index a293382..eb91e44 100644
--- a/docs/xdocs/language_manual/cli.xml
+++ b/docs/xdocs/language_manual/cli.xml
@@ -163,7 +163,7 @@ Sample Usage:
 
 <section name="Logging" href="logging">
 <p>
-Hive uses log4j for logging. These logs are not emitted to the standard output by default but are instead captured to a log file specified by Hive's log4j properties file. By default Hive will use <i>hive-log4j.default</i> in the <i>conf/</i> directory of the hive installation which writes out logs to <i>/tmp/$USER/hive.log</i> and uses the <i>WARN</i> level.
+Hive uses log4j for logging. These logs are not emitted to the standard output by default but are instead captured to a log file specified by Hive's log4j properties file. By default Hive will use <i>hive-log4j2.xml</i> in the <i>conf/</i> directory of the hive installation which writes out logs to <i>/tmp/$USER/hive.log</i> and uses the <i>WARN</i> level.
 </p>
 <p>
 It is often desirable to emit the logs to the standard output and/or change the logging level for debugging purposes. These can be done from the command line as follows: </p>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/bin/hcat_server.sh
----------------------------------------------------------------------
diff --git a/hcatalog/bin/hcat_server.sh b/hcatalog/bin/hcat_server.sh
index 6b09d3e..d1aecb8 100644
--- a/hcatalog/bin/hcat_server.sh
+++ b/hcatalog/bin/hcat_server.sh
@@ -84,7 +84,7 @@ function start_hcat() {
   export AUX_CLASSPATH=${AUX_CLASSPATH}
 
   export HADOOP_HOME=$HADOOP_HOME
-  #export HADOOP_OPTS="-Dlog4j.configuration=file://${HCAT_PREFIX}/conf/log4j.properties"
+  #export HADOOP_OPTS="-Dlog4j.configurationFile=file://${HCAT_PREFIX}/conf/log4j2.xml"
   export HADOOP_OPTS="${HADOOP_OPTS} -server -XX:+UseConcMarkSweepGC -XX:ErrorFile=${HCAT_LOG_DIR}/hcat_err_pid%p.log -Xloggc:${HCAT_LOG_DIR}/hcat_gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
   export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-2048} # 8G is better if you have it
   export METASTORE_PORT=${METASTORE_PORT:-9083}

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/bin/templeton.cmd
----------------------------------------------------------------------
diff --git a/hcatalog/bin/templeton.cmd b/hcatalog/bin/templeton.cmd
index e9a735d..759f654 100644
--- a/hcatalog/bin/templeton.cmd
+++ b/hcatalog/bin/templeton.cmd
@@ -59,9 +59,9 @@ setlocal enabledelayedexpansion
 
   if not defined TEMPLETON_LOG4J (
     @rem must be prefixed with file: otherwise config is not picked up
-    set TEMPLETON_LOG4J=file:%WEBHCAT_CONF_DIR%\webhcat-log4j.properties
+    set TEMPLETON_LOG4J=file:%WEBHCAT_CONF_DIR%\webhcat-log4j2.xml
   )
-  set TEMPLETON_OPTS=-Dtempleton.log.dir=%TEMPLETON_LOG_DIR% -Dlog4j.configuration=%TEMPLETON_LOG4J% %HADOOP_OPTS%
+  set TEMPLETON_OPTS=-Dtempleton.log.dir=%TEMPLETON_LOG_DIR% -Dlog4j.configurationFile=%TEMPLETON_LOG4J% %HADOOP_OPTS%
   set arguments=%JAVA_HEAP_MAX% %TEMPLETON_OPTS% -classpath %CLASSPATH% org.apache.hive.hcatalog.templeton.Main
   
   if defined service_entry (

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/scripts/hcat_server_start.sh
----------------------------------------------------------------------
diff --git a/hcatalog/scripts/hcat_server_start.sh b/hcatalog/scripts/hcat_server_start.sh
index 1670b70..872d1b5 100755
--- a/hcatalog/scripts/hcat_server_start.sh
+++ b/hcatalog/scripts/hcat_server_start.sh
@@ -70,7 +70,7 @@ export AUX_CLASSPATH=${AUX_CLASSPATH}
 
 
 export HADOOP_HOME=$HADOOP_HOME
-#export HADOOP_OPTS="-Dlog4j.configuration=file://${ROOT}/conf/log4j.properties"
+#export HADOOP_OPTS="-Dlog4j.configurationFile=file://${ROOT}/conf/log4j2.xml"
 export HADOOP_OPTS="${HADOOP_OPTS} -server -XX:+UseConcMarkSweepGC -XX:ErrorFile=${ROOT}/var/log/hcat_err_pid%p.log -Xloggc:${ROOT}/var/log/hcat_gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
 export HADOOP_HEAPSIZE=2048 # 8G is better if you have it
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/src/docs/src/documentation/content/xdocs/configuration.xml
----------------------------------------------------------------------
diff --git a/hcatalog/src/docs/src/documentation/content/xdocs/configuration.xml b/hcatalog/src/docs/src/documentation/content/xdocs/configuration.xml
index 9757b9c..6385e40 100644
--- a/hcatalog/src/docs/src/documentation/content/xdocs/configuration.xml
+++ b/hcatalog/src/docs/src/documentation/content/xdocs/configuration.xml
@@ -66,7 +66,7 @@ ${env.PIG_HOME}/bin/pig
      uncertainty.</p>
 
   <p><strong>Note:</strong> The location of the log files created by Templeton and some other properties
-     of the logging system are set in the webhcat-log4j.properties file.</p>
+     of the logging system are set in the webhcat-log4j2.xml file.</p>
 
   <section>
   <title>Variables</title>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/src/docs/src/documentation/content/xdocs/install.xml
----------------------------------------------------------------------
diff --git a/hcatalog/src/docs/src/documentation/content/xdocs/install.xml b/hcatalog/src/docs/src/documentation/content/xdocs/install.xml
index 16da248..e2953a9 100644
--- a/hcatalog/src/docs/src/documentation/content/xdocs/install.xml
+++ b/hcatalog/src/docs/src/documentation/content/xdocs/install.xml
@@ -241,7 +241,7 @@
 
     <p>Server activity logs are located in
     <em>root</em><code>/var/log/hcat_server</code>.  Logging configuration is located at
-    <em>root</em><code>/conf/log4j.properties</code>.  Server logging uses
+    <em>root</em><code>/conf/log4j2.xml</code>.  Server logging uses
     <code>DailyRollingFileAppender</code> by default. It will generate a new
     file per day and does not expire old log files automatically.</p>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j.properties
----------------------------------------------------------------------
diff --git a/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j.properties b/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j.properties
deleted file mode 100644
index 82684b3..0000000
--- a/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j.properties
+++ /dev/null
@@ -1,88 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hive.log.threshold=ALL
-hive.root.logger=DEBUG,DRFA
-hive.log.dir=/tmp/ekoifman
-hive.log.file=hive.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshold=${hive.log.threshold}
-
-#
-# Daily Rolling File Appender
-#
-# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files
-# for different CLI session.
-#
-# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-
-log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
-log4j.appender.console.encoding=UTF-8
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,DRFA
-log4j.category.Datastore=ERROR,DRFA
-log4j.category.Datastore.Schema=ERROR,DRFA
-log4j.category.JPOX.Datastore=ERROR,DRFA
-log4j.category.JPOX.Plugin=ERROR,DRFA
-log4j.category.JPOX.MetaData=ERROR,DRFA
-log4j.category.JPOX.Query=ERROR,DRFA
-log4j.category.JPOX.General=ERROR,DRFA
-log4j.category.JPOX.Enhancer=ERROR,DRFA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j2.xml b/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j2.xml
new file mode 100644
index 0000000..30f7603
--- /dev/null
+++ b/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j2.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="HCatE2ELog4j2"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.level">DEBUG</Property>
+    <Property name="hive.root.logger">DRFA</Property>
+    <Property name="hive.log.dir">${sys:java.io.tmpdir}/${sys:user.name}</Property>
+    <Property name="hive.log.file">hive.log</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n"/>
+    </Console>
+
+    <!-- Regular File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- <File name="FA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+    </File> -->
+
+    <!-- Daily Rolling File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session -->
+    <RollingFile name="DRFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
+     filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+      <Policies>
+        <!-- Rollover at mignight (interval = 1 means daily) -->
+        <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
+      </Policies>
+      <!-- 30-day backup -->
+      <!-- <DefaultRolloverStrategy max="30"/> -->
+    </RollingFile>
+
+    <!-- Size based Rolling File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- <RollingFile name="RFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
+     filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%i">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+      <Policies>
+        <SizeBasedTriggeringPolicy size="256 MB" />
+      </Policies>
+      <DefaultRolloverStrategy max="10"/>
+    </RollingFile> -->
+
+    <!-- HiveEventCounter appender is loaded from Configuration packages attribute.Sends counts of logging messages at different severity levels to Hadoop Metrics. -->
+    <HiveEventCounter name="EventCounter"/>
+  </Appenders>
+
+  <Loggers>
+    <Root level="${sys:hive.log.threshold}">
+      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+      <AppenderRef ref="EventCounter" />
+    </Root>
+
+    <Logger name="org.apache.zookeeper.server.NIOServerCnxn" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="DataNucleus" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore.Schema" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Plugin" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Metadata" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Query" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.General" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Enhancer" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+  </Loggers>
+
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/src/test/e2e/templeton/deployers/start_hive_services.sh
----------------------------------------------------------------------
diff --git a/hcatalog/src/test/e2e/templeton/deployers/start_hive_services.sh b/hcatalog/src/test/e2e/templeton/deployers/start_hive_services.sh
index 8cc9353..e59177c 100755
--- a/hcatalog/src/test/e2e/templeton/deployers/start_hive_services.sh
+++ b/hcatalog/src/test/e2e/templeton/deployers/start_hive_services.sh
@@ -31,7 +31,7 @@ cp ${PROJ_HOME}/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.
 #cp ${PROJ_HOME}/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-site.mssql.xml ${HIVE_HOME}/conf/hive-site.xml
 
 cp ${PROJ_HOME}/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml ${HIVE_HOME}/hcatalog/etc/webhcat/webhcat-site.xml
-cp ${PROJ_HOME}/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j.properties ${HIVE_HOME}/conf/hive-log4j.properties
+cp ${PROJ_HOME}/hcatalog/src/test/e2e/templeton/deployers/config/hive/hive-log4j2.xml ${HIVE_HOME}/conf/hive-log4j2.xml
 
 if [ -f ${MYSQL_CLIENT_JAR} ]; then
   cp ${MYSQL_CLIENT_JAR} ${HIVE_HOME}/lib

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/webhcat/svr/src/main/bin/webhcat_server.sh
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/bin/webhcat_server.sh b/hcatalog/webhcat/svr/src/main/bin/webhcat_server.sh
index 0be8dde..c80fdd5 100644
--- a/hcatalog/webhcat/svr/src/main/bin/webhcat_server.sh
+++ b/hcatalog/webhcat/svr/src/main/bin/webhcat_server.sh
@@ -215,11 +215,11 @@ else
 fi
 
 if [[ -z "$WEBHCAT_LOG4J" ]]; then
-  WEBHCAT_LOG4J="file://$base_dir/etc/webhcat/webhcat-log4j.properties";
+  WEBHCAT_LOG4J="file://$base_dir/etc/webhcat/webhcat-log4j2.xml";
 fi
 
 export HADOOP_USER_CLASSPATH_FIRST=true
-export HADOOP_OPTS="${HADOOP_OPTS} -Dwebhcat.log.dir=$WEBHCAT_LOG_DIR -Dlog4j.configuration=$WEBHCAT_LOG4J"
+export HADOOP_OPTS="${HADOOP_OPTS} -Dwebhcat.log.dir=$WEBHCAT_LOG_DIR -Dlog4j.configurationFile=$WEBHCAT_LOG4J"
 
 start_cmd="$HADOOP_PREFIX/bin/hadoop jar $JAR org.apache.hive.hcatalog.templeton.Main  "
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/webhcat/svr/src/main/config/webhcat-log4j.properties
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/config/webhcat-log4j.properties b/hcatalog/webhcat/svr/src/main/config/webhcat-log4j.properties
deleted file mode 100644
index 866052c..0000000
--- a/hcatalog/webhcat/svr/src/main/config/webhcat-log4j.properties
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Define some default values that can be overridden by system properties
-webhcat.root.logger = INFO, standard
-webhcat.log.dir = .
-webhcat.log.file = webhcat.log
-
-log4j.rootLogger = ${webhcat.root.logger}
-
-# Logging Threshold
-log4j.threshhold = DEBUG
-
-log4j.appender.standard  =  org.apache.log4j.DailyRollingFileAppender
-log4j.appender.standard.File = ${webhcat.log.dir}/${webhcat.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern = .yyyy-MM-dd
-
-log4j.appender.DRFA.layout = org.apache.log4j.PatternLayout
-
-log4j.appender.standard.layout = org.apache.log4j.PatternLayout
-log4j.appender.standard.layout.conversionPattern = %-5p | %d{DATE} | %c | %m%n
-
-# Class logging settings
-log4j.logger.com.sun.jersey = DEBUG
-log4j.logger.com.sun.jersey.spi.container.servlet.WebComponent = ERROR
-log4j.logger.org.apache.hadoop = INFO
-log4j.logger.org.apache.hadoop.conf = WARN
-log4j.logger.org.apache.zookeeper = WARN
-log4j.logger.org.eclipse.jetty = INFO

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml b/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
new file mode 100644
index 0000000..40da974
--- /dev/null
+++ b/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
@@ -0,0 +1,75 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="WebhcatLog4j2"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="webhcat.log.threshold">ALL</Property>
+    <Property name="webhcat.log.level">INFO</Property>
+    <Property name="webhcat.root.logger">standard</Property>
+    <Property name="webhcat.log.dir">.</Property>
+    <Property name="webhcat.log.file">webhcat.log</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n"/>
+    </Console>
+
+    <!-- Daily Rolling File Appender -->
+    <!-- Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session -->
+    <RollingFile name="standard" fileName="${sys:webhcat.log.dir}/${sys:webhcat.log.file}"
+     filePattern="${sys:webhcat.log.dir}/${sys:webhcat.log.file}.%d{yyyy-MM-dd}">
+      <PatternLayout pattern="%-5p | %d{DATE} | %c | %m%n" />
+      <Policies>
+        <!-- Rollover at mignight (interval = 1 means daily) -->
+        <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
+      </Policies>
+      <!-- 30-day backup -->
+      <!-- <DefaultRolloverStrategy max="30"/> -->
+    </RollingFile>
+
+  </Appenders>
+
+  <Loggers>
+    <Root level="${sys:webhcat.log.threshold}">
+      <AppenderRef ref="${sys:webhcat.root.logger}" level="${sys:webhcat.log.level}"/>
+    </Root>
+
+    <Logger name="com.sun.jersey" level="DEBUG">
+      <AppenderRef ref="${sys:webhcat.root.logger}"/>
+    </Logger>
+    <Logger name="com.sun.jersey.spi.container.servlet.WebComponent" level="ERROR">
+      <AppenderRef ref="${sys:webhcat.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.hadoop" level="INFO">
+      <AppenderRef ref="${sys:webhcat.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.hadoop.conf" level="WARN">
+      <AppenderRef ref="${sys:webhcat.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper" level="WARN">
+      <AppenderRef ref="${sys:webhcat.root.logger}"/>
+    </Logger>
+    <Logger name="org.eclipse.jetty" level="INFO">
+      <AppenderRef ref="${sys:webhcat.root.logger}"/>
+    </Logger>
+  </Loggers>
+
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
----------------------------------------------------------------------
diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
index 73f470c..268c218 100644
--- a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
+++ b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
@@ -670,6 +670,8 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
     if (!parseArguments(args)) {
       return 1;
     }
+    // specify the default log4j2 properties file.
+    System.setProperty("log4j.configurationFile", "hive-log4j2.xml");
     conf = new Conf();
     conf.init();    
     conn = new Conn(this);

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
new file mode 100644
index 0000000..93c16de
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingLayout.java
@@ -0,0 +1,136 @@
+package org.apache.hive.service.cli.operation;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.apache.hive.service.cli.CLIServiceClient;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.FetchType;
+import org.apache.hive.service.cli.OperationHandle;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.SessionHandle;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests to verify operation logging layout for different modes.
+ */
+public class TestOperationLoggingLayout {
+  protected static HiveConf hiveConf;
+  protected static String tableName;
+  private File dataFile;
+  protected CLIServiceClient client;
+  protected static MiniHS2 miniHS2 = null;
+  protected static Map<String, String> confOverlay;
+  protected SessionHandle sessionHandle;
+  protected final String sql = "select * from " + tableName;
+  private final String sqlCntStar = "select count(*) from " + tableName;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    tableName = "TestOperationLoggingLayout_table";
+    hiveConf = new HiveConf();
+    hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "execution");
+    // We need to set the below parameter to test performance level logging
+    hiveConf.set("hive.ql.log.PerfLogger.level", "INFO,DRFA");
+    miniHS2 = new MiniHS2(hiveConf);
+    confOverlay = new HashMap<String, String>();
+    confOverlay.put(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
+    miniHS2.start(confOverlay);
+  }
+
+  /**
+   * Open a session, and create a table for cases usage
+   *
+   * @throws Exception
+   */
+  @Before
+  public void setUp() throws Exception {
+    dataFile = new File(hiveConf.get("test.data.files"), "kv1.txt");
+    client = miniHS2.getServiceClient();
+    sessionHandle = setupSession();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    // Cleanup
+    String queryString = "DROP TABLE " + tableName;
+    client.executeStatement(sessionHandle, queryString, null);
+
+    client.closeSession(sessionHandle);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    miniHS2.stop();
+  }
+
+  @Test
+  public void testSwitchLogLayout() throws Exception {
+    // verify whether the sql operation log is generated and fetch correctly.
+    OperationHandle operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
+    RowSet rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 1000,
+        FetchType.LOG);
+    Iterator<Object[]> iter = rowSetLog.iterator();
+    // non-verbose pattern is %-5p : %m%n. Look for " : "
+    while (iter.hasNext()) {
+      String row = iter.next()[0].toString();
+      Assert.assertEquals(true, row.matches("^(FATAL|ERROR|WARN|INFO|DEBUG|TRACE).*$"));
+    }
+
+    String queryString = "set hive.server2.logging.operation.level=verbose";
+    client.executeStatement(sessionHandle, queryString, null);
+    operationHandle = client.executeStatement(sessionHandle, sqlCntStar, null);
+    // just check for first few lines, some log lines are multi-line strings which can break format
+    // checks below
+    rowSetLog = client.fetchResults(operationHandle, FetchOrientation.FETCH_FIRST, 10,
+        FetchType.LOG);
+    iter = rowSetLog.iterator();
+    // verbose pattern is "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n"
+    while (iter.hasNext()) {
+      String row = iter.next()[0].toString();
+      // just check if the log line starts with date
+      Assert.assertEquals(true,
+          row.matches("^\\d{2}[/](0[1-9]|1[012])[/](0[1-9]|[12][0-9]|3[01]).*$"));
+    }
+  }
+
+  private SessionHandle setupSession() throws Exception {
+    // Open a session
+    SessionHandle sessionHandle = client.openSession(null, null, null);
+
+    // Change lock manager to embedded mode
+    String queryString = "SET hive.lock.manager=" +
+        "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager";
+    client.executeStatement(sessionHandle, queryString, null);
+
+    // Drop the table if it exists
+    queryString = "DROP TABLE IF EXISTS " + tableName;
+    client.executeStatement(sessionHandle, queryString, null);
+
+    // Create a test table
+    queryString = "create table " + tableName + " (key int, value string)";
+    client.executeStatement(sessionHandle, queryString, null);
+
+    // Load data
+    queryString = "load data local inpath '" + dataFile + "' into table " + tableName;
+    client.executeStatement(sessionHandle, queryString, null);
+
+    // Precondition check: verify whether the table is created and data is fetched correctly.
+    OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null);
+    RowSet rowSetResult = client.fetchResults(operationHandle);
+    Assert.assertEquals(500, rowSetResult.numRows());
+    Assert.assertEquals(238, rowSetResult.iterator().next()[0]);
+    Assert.assertEquals("val_238", rowSetResult.iterator().next()[1]);
+
+    return sessionHandle;
+  }
+}


[44/50] [abbrv] hive git commit: HIVE-11568 : merge master into branch (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
----------------------------------------------------------------------
diff --cc metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index f8042fc,0000000..0204f37
mode 100644,000000..100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@@ -1,2225 -1,0 +1,2241 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.hadoop.hive.metastore.hbase;
 +
 +import com.google.common.annotations.VisibleForTesting;
 +import com.google.common.cache.CacheLoader;
 +import org.apache.commons.lang.StringUtils;
 +import org.apache.commons.logging.Log;
 +import org.apache.commons.logging.LogFactory;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.hive.common.FileUtils;
 +import org.apache.hadoop.hive.metastore.HiveMetaStore;
 +import org.apache.hadoop.hive.metastore.PartFilterExprUtil;
 +import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
 +import org.apache.hadoop.hive.metastore.RawStore;
 +import org.apache.hadoop.hive.metastore.api.AggrStats;
 +import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 +import org.apache.hadoop.hive.metastore.api.Database;
 +import org.apache.hadoop.hive.metastore.api.FieldSchema;
 +import org.apache.hadoop.hive.metastore.api.Function;
 +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 +import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 +import org.apache.hadoop.hive.metastore.api.HiveObjectType;
 +import org.apache.hadoop.hive.metastore.api.Index;
 +import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 +import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 +import org.apache.hadoop.hive.metastore.api.MetaException;
 +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 +import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 +import org.apache.hadoop.hive.metastore.api.Partition;
 +import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 +import org.apache.hadoop.hive.metastore.api.PrincipalType;
 +import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 +import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
 +import org.apache.hadoop.hive.metastore.api.Role;
 +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 +import org.apache.hadoop.hive.metastore.api.Table;
 +import org.apache.hadoop.hive.metastore.api.Type;
 +import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 +import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 +import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult;
 +import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan;
 +import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
 +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 +import org.apache.thrift.TException;
 +
 +import java.io.IOException;
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.Set;
 +
 +/**
 + * Implementation of RawStore that stores data in HBase
 + */
 +public class HBaseStore implements RawStore {
 +  static final private Log LOG = LogFactory.getLog(HBaseStore.class.getName());
 +
 +  // Do not access this directly, call getHBase to make sure it is initialized.
 +  private HBaseReadWrite hbase = null;
 +  private Configuration conf;
 +  private int txnNestLevel = 0;
 +  private PartitionExpressionProxy expressionProxy = null;
 +
 +  public HBaseStore() {
 +  }
 +
 +  @Override
 +  public void shutdown() {
 +    try {
 +      if (txnNestLevel != 0) rollbackTransaction();
 +      getHBase().close();
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    }
 +  }
 +
 +  @Override
 +  public boolean openTransaction() {
 +    if (txnNestLevel++ <= 0) {
 +      LOG.debug("Opening HBase transaction");
 +      getHBase().begin();
 +      txnNestLevel = 1;
 +    }
 +    return true;
 +  }
 +
 +  @Override
 +  public boolean commitTransaction() {
 +    if (--txnNestLevel == 0) {
 +      LOG.debug("Committing HBase transaction");
 +      getHBase().commit();
 +    }
 +    return true;
 +  }
 +
 +  @Override
 +  public void rollbackTransaction() {
 +    txnNestLevel = 0;
 +    LOG.debug("Rolling back HBase transaction");
 +    getHBase().rollback();
 +  }
 +
 +  @Override
 +  public void createDatabase(Database db) throws InvalidObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +
 +      // HiveMetaStore already checks for existence of the database, don't recheck
 +      getHBase().putDb(db);
 +      commit = true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to create database ", e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +
 +  }
 +
 +  @Override
 +  public Database getDatabase(String name) throws NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Database db = getHBase().getDb(name);
 +      if (db == null) {
 +        throw new NoSuchObjectException("Unable to find db " + name);
 +      }
 +      commit = true;
 +      return db;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get db", e);
 +      throw new NoSuchObjectException("Error reading db " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().deleteDb(dbname);
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to delete db" + e);
 +      throw new MetaException("Unable to drop database " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException,
 +      MetaException {
 +    // ObjectStore fetches the old db before updating it, but I can't see the possible value of
 +    // that since the caller will have needed to call getDatabase to have the db object.
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().putDb(db);
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to alter database ", e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<String> getDatabases(String pattern) throws MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Database> dbs = getHBase().scanDatabases(likeToRegex(pattern));
 +      List<String> dbNames = new ArrayList<String>(dbs.size());
 +      for (Database db : dbs) dbNames.add(db.getName());
 +      commit = true;
 +      return dbNames;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get databases ", e);
 +      throw new MetaException("Unable to get databases, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<String> getAllDatabases() throws MetaException {
 +    return getDatabases(null);
 +  }
 +
 +  @Override
 +  public boolean createType(Type type) {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public Type getType(String typeName) {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public boolean dropType(String typeName) {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public void createTable(Table tbl) throws InvalidObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    // HiveMetaStore above us checks if the table already exists, so we can blindly store it here.
 +    try {
 +      getHBase().putTable(tbl);
 +      commit = true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to create table ", e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean dropTable(String dbName, String tableName) throws MetaException,
 +      NoSuchObjectException, InvalidObjectException, InvalidInputException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().deleteTable(dbName, tableName);
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to delete db" + e);
 +      throw new MetaException("Unable to drop table " + tableNameForErrorMsg(dbName, tableName));
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public Table getTable(String dbName, String tableName) throws MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Table table = getHBase().getTable(dbName, tableName);
 +      if (table == null) {
 +        LOG.debug("Unable to find table " + tableNameForErrorMsg(dbName, tableName));
 +      }
 +      commit = true;
 +      return table;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get table", e);
 +      throw new MetaException("Error reading table " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().putPartition(part);
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to add partition", e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean addPartitions(String dbName, String tblName, List<Partition> parts)
 +      throws InvalidObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().putPartitions(parts);
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to add partitions", e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec,
 +                               boolean ifNotExists) throws InvalidObjectException, MetaException {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public Partition getPartition(String dbName, String tableName, List<String> part_vals) throws
 +      MetaException, NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Partition part = getHBase().getPartition(dbName, tableName, part_vals);
 +      if (part == null) {
 +        throw new NoSuchObjectException("Unable to find partition " +
 +            partNameForErrorMsg(dbName, tableName, part_vals));
 +      }
 +      commit = true;
 +      return part;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get partition", e);
 +      throw new MetaException("Error reading partition " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean doesPartitionExist(String dbName, String tableName, List<String> part_vals) throws
 +      MetaException, NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      boolean exists = getHBase().getPartition(dbName, tableName, part_vals) != null;
 +      commit = true;
 +      return exists;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get partition", e);
 +      throw new MetaException("Error reading partition " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean dropPartition(String dbName, String tableName, List<String> part_vals) throws
 +      MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().deletePartition(dbName, tableName, part_vals);
 +      // Drop any cached stats that reference this partitions
 +      getHBase().getStatsCache().invalidate(dbName, tableName,
 +          buildExternalPartName(dbName, tableName, part_vals));
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to delete db" + e);
 +      throw new MetaException("Unable to drop partition " + partNameForErrorMsg(dbName, tableName,
 +          part_vals));
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<Partition> getPartitions(String dbName, String tableName, int max) throws
 +      MetaException, NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Partition> parts = getHBase().scanPartitionsInTable(dbName, tableName, max);
 +      commit = true;
 +      return parts;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get partitions", e);
 +      throw new MetaException("Error scanning partitions");
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException,
 +      MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().replaceTable(getHBase().getTable(dbname, name), newTable);
 +      if (newTable.getPartitionKeys() != null && newTable.getPartitionKeys().size() > 0
 +          && !name.equals(newTable.getTableName())) {
 +        // They renamed the table, so we need to change each partition as well, since it changes
 +        // the key.
 +        try {
 +          List<Partition> oldParts = getPartitions(dbname, name, -1);
 +          List<Partition> newParts = new ArrayList<>(oldParts.size());
 +          for (Partition oldPart : oldParts) {
 +            Partition newPart = oldPart.deepCopy();
 +            newPart.setTableName(newTable.getTableName());
 +            newParts.add(newPart);
 +          }
 +          getHBase().replacePartitions(oldParts, newParts);
 +        } catch (NoSuchObjectException e) {
 +          LOG.debug("No partitions found for old table so not worrying about it");
 +        }
 +
 +      }
 +      commit = true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to alter table " + tableNameForErrorMsg(dbname, name), e);
 +      throw new MetaException("Unable to alter table " + tableNameForErrorMsg(dbname, name));
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<String> getTables(String dbName, String pattern) throws MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Table> tables = getHBase().scanTables(dbName, likeToRegex(pattern));
 +      List<String> tableNames = new ArrayList<String>(tables.size());
 +      for (Table table : tables) tableNames.add(table.getTableName());
 +      commit = true;
 +      return tableNames;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get tables ", e);
 +      throw new MetaException("Unable to get tables, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<Table> getTableObjectsByName(String dbname, List<String> tableNames) throws
 +      MetaException, UnknownDBException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Table> tables = getHBase().getTables(dbname, tableNames);
 +      commit = true;
 +      return tables;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get tables ", e);
 +      throw new MetaException("Unable to get tables, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<String> getAllTables(String dbName) throws MetaException {
 +    return getTables(dbName, null);
 +  }
 +
 +  @Override
 +  public List<String> listTableNamesByFilter(String dbName, String filter, short max_tables) throws
 +      MetaException, UnknownDBException {
 +    // TODO needs to wait until we support pushing filters into HBase.
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public List<String> listPartitionNames(String db_name, String tbl_name, short max_parts) throws
 +      MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Partition> parts = getHBase().scanPartitionsInTable(db_name, tbl_name, max_parts);
 +      if (parts == null) return null;
 +      List<String> names = new ArrayList<String>(parts.size());
 +      Table table = getHBase().getTable(db_name, tbl_name);
 +      for (Partition p : parts) {
 +        names.add(buildExternalPartName(table, p));
 +      }
 +      commit = true;
 +      return names;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get partitions", e);
 +      throw new MetaException("Error scanning partitions");
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<String> listPartitionNamesByFilter(String db_name, String tbl_name, String filter,
 +                                                 short max_parts) throws MetaException {
 +    // TODO needs to wait until we support pushing filters into HBase.
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public void alterPartition(String db_name, String tbl_name, List<String> part_vals,
 +                             Partition new_part) throws InvalidObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Partition oldPart = getHBase().getPartition(db_name, tbl_name, part_vals);
 +      getHBase().replacePartition(oldPart, new_part);
 +      // Drop any cached stats that reference this partitions
 +      getHBase().getStatsCache().invalidate(db_name, tbl_name,
 +          buildExternalPartName(db_name, tbl_name, part_vals));
 +      commit = true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to add partition", e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public void alterPartitions(String db_name, String tbl_name, List<List<String>> part_vals_list,
 +                              List<Partition> new_parts) throws InvalidObjectException,
 +      MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Partition> oldParts = getHBase().getPartitions(db_name, tbl_name, part_vals_list);
 +      getHBase().replacePartitions(oldParts, new_parts);
 +      for (List<String> part_vals : part_vals_list) {
 +        getHBase().getStatsCache().invalidate(db_name, tbl_name,
 +            buildExternalPartName(db_name, tbl_name, part_vals));
 +      }
 +      commit = true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to add partition", e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean addIndex(Index index) throws InvalidObjectException, MetaException {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public Index getIndex(String dbName, String origTableName, String indexName) throws
 +      MetaException {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public boolean dropIndex(String dbName, String origTableName, String indexName) throws
 +      MetaException {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public List<Index> getIndexes(String dbName, String origTableName, int max) throws MetaException {
 +    // TODO - Index not currently supported.  But I need to return an empty list or else drop
 +    // table cores.
 +    return new ArrayList<Index>();
 +  }
 +
 +  @Override
 +  public List<String> listIndexNames(String dbName, String origTableName, short max) throws
 +      MetaException {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public void alterIndex(String dbname, String baseTblName, String name, Index newIndex) throws
 +      InvalidObjectException, MetaException {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public List<Partition> getPartitionsByFilter(String dbName, String tblName, String filter,
 +                                               short maxParts) throws MetaException,
 +      NoSuchObjectException {
 +    final ExpressionTree exprTree = (filter != null && !filter.isEmpty()) ? PartFilterExprUtil
 +        .getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE;
 +    List<Partition> result = new ArrayList<Partition>();
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result);
 +      return result;
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr,
 +                                     String defaultPartitionName, short maxParts,
 +                                     List<Partition> result) throws TException {
 +    final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr);
 +    // TODO: investigate if there should be any role for defaultPartitionName in this
 +    // implementation. direct sql code path in ObjectStore does not use it.
 +
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      return getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  private boolean getPartitionsByExprInternal(String dbName, String tblName,
 +      ExpressionTree exprTree, short maxParts, List<Partition> result) throws MetaException,
 +      NoSuchObjectException {
 +
 +    Table table = getTable(dbName, tblName);
 +    if (table == null) {
 +      throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName);
 +    }
 +    String firstPartitionColumn = table.getPartitionKeys().get(0).getName();
 +    // general hbase filter plan from expression tree
 +    PlanResult planRes = HBaseFilterPlanUtil.getFilterPlan(exprTree, firstPartitionColumn);
 +
 +    if (LOG.isDebugEnabled()) {
 +      LOG.debug("Hbase Filter Plan generated : " + planRes.plan);
 +    }
 +
 +    // results from scans need to be merged as there can be overlapping results between
 +    // the scans. Use a map of list of partition values to partition for this.
 +    Map<List<String>, Partition> mergedParts = new HashMap<List<String>, Partition>();
 +    for (ScanPlan splan : planRes.plan.getPlans()) {
 +      try {
 +        List<Partition> parts = getHBase().scanPartitions(dbName, tblName,
 +            splan.getStartRowSuffix(), splan.getEndRowSuffix(), null, -1);
 +        boolean reachedMax = false;
 +        for (Partition part : parts) {
 +          mergedParts.put(part.getValues(), part);
 +          if (mergedParts.size() == maxParts) {
 +            reachedMax = true;
 +            break;
 +          }
 +        }
 +        if (reachedMax) {
 +          break;
 +        }
 +      } catch (IOException e) {
 +        LOG.error("Unable to get partitions", e);
 +        throw new MetaException("Error scanning partitions" + tableNameForErrorMsg(dbName, tblName)
 +            + ": " + e);
 +      }
 +    }
 +    for (Entry<List<String>, Partition> mp : mergedParts.entrySet()) {
 +      result.add(mp.getValue());
 +    }
 +    if (LOG.isDebugEnabled()) {
 +      LOG.debug("Matched partitions " + result);
 +    }
 +
 +    // return true if there might be some additional partitions that don't match filter conditions
 +    // being returned
 +    return !planRes.hasUnsupportedCondition;
 +  }
 +
 +  @Override
 +  public List<Partition> getPartitionsByNames(String dbName, String tblName,
 +                                              List<String> partNames) throws MetaException,
 +      NoSuchObjectException {
 +    List<Partition> parts = new ArrayList<Partition>();
 +    for (String partName : partNames) {
 +      parts.add(getPartition(dbName, tblName, partNameToVals(partName)));
 +    }
 +    return parts;
 +  }
 +
 +  @Override
 +  public Table markPartitionForEvent(String dbName, String tblName, Map<String, String> partVals,
 +                                     PartitionEventType evtType) throws MetaException,
 +      UnknownTableException, InvalidPartitionException, UnknownPartitionException {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public boolean isPartitionMarkedForEvent(String dbName, String tblName,
 +                                           Map<String, String> partName,
 +                                           PartitionEventType evtType) throws MetaException,
 +      UnknownTableException, InvalidPartitionException, UnknownPartitionException {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  /*
 +   * The design for roles.  Roles are a pain because of their hierarchical nature.  When a user
 +   * comes in and we need to be able to determine all roles he is a part of, we do not want to
 +   * have to walk the hierarchy in the database.  This means we need to flatten the role map for
 +   * each user.  But we also have to track how the roles are connected for each user, in case one
 +   * role is revoked from another (e.g. if role1 is included in role2 but then revoked
 +   * from it and user1 was granted both role2 and role1 we cannot remove user1 from role1
 +   * because he was granted that separately).
 +   *
 +   * We want to optimize for the read case and put the cost on grant and revoke of roles, since
 +   * we assume that is the less common case.  So we lay out the roles data as follows:
 +   *
 +   * There is a ROLES table that records each role, plus what other principals have been granted
 +   * into it, along with the info on grantor, etc.
 +   *
 +   * There is a USER_TO_ROLES table that contains the mapping of each user to every role he is a
 +   * part of.
 +   *
 +   * This makes determining what roles a user participates in very quick, as USER_TO_ROLE is a
 +   * simple list for each user.  It makes granting users into roles expensive, and granting roles
 +   * into roles very expensive.  Each time a user is granted into a role, we need to walk the
 +   * hierarchy in the role table (which means moving through that table multiple times) to
 +   * determine every role the user participates in.  Each a role is granted into another role
 +   * this hierarchical walk must be done for every principal in the role being granted into.  To
 +   * mitigate this pain somewhat whenever doing these mappings we cache the entire ROLES table in
 +   * memory since we assume it is not large.
 +   *
 +   * On a related note, whenever a role is dropped we must walk not only all these role tables
 +   * above (equivalent to a role being revoked from another role, since we have to rebuilding
 +   * mappings for any users in roles that contained that role and any users directly in that
 +   * role), but we also have to remove all the privileges associated with that role directly.
 +   * That means a walk of the DBS table and of the TBLS table.
 +   */
 +
 +  @Override
 +  public boolean addRole(String roleName, String ownerName) throws InvalidObjectException,
 +      MetaException, NoSuchObjectException {
 +    int now = (int)(System.currentTimeMillis()/1000);
 +    Role role = new Role(roleName, now, ownerName);
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      if (getHBase().getRole(roleName) != null) {
 +        throw new InvalidObjectException("Role " + roleName + " already exists");
 +      }
 +      getHBase().putRole(role);
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to create role ", e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Set<String> usersInRole = getHBase().findAllUsersInRole(roleName);
 +      getHBase().deleteRole(roleName);
 +      getHBase().removeRoleGrants(roleName);
 +      for (String user : usersInRole) {
 +        getHBase().buildRoleMapForUser(user);
 +      }
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to delete role" + e);
 +      throw new MetaException("Unable to drop role " + roleName);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor,
 +                           PrincipalType grantorType, boolean grantOption)
 +      throws MetaException, NoSuchObjectException, InvalidObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Set<String> usersToRemap = findUsersToRemapRolesFor(role, userName, principalType);
 +      HbaseMetastoreProto.RoleGrantInfo.Builder builder =
 +          HbaseMetastoreProto.RoleGrantInfo.newBuilder();
 +      if (userName != null) builder.setPrincipalName(userName);
 +      if (principalType != null) {
 +        builder.setPrincipalType(HBaseUtils.convertPrincipalTypes(principalType));
 +      }
 +      builder.setAddTime((int)(System.currentTimeMillis() / 1000));
 +      if (grantor != null) builder.setGrantor(grantor);
 +      if (grantorType != null) {
 +        builder.setGrantorType(HBaseUtils.convertPrincipalTypes(grantorType));
 +      }
 +      builder.setGrantOption(grantOption);
 +
 +      getHBase().addPrincipalToRole(role.getRoleName(), builder.build());
 +      for (String user : usersToRemap) {
 +        getHBase().buildRoleMapForUser(user);
 +      }
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to grant role", e);
 +      throw new MetaException("Unable to grant role " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean revokeRole(Role role, String userName, PrincipalType principalType,
 +                            boolean grantOption) throws MetaException, NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    // This can have a couple of different meanings.  If grantOption is true, then this is only
 +    // revoking the grant option, the role itself doesn't need to be removed.  If it is false
 +    // then we need to remove the userName from the role altogether.
 +    try {
 +      if (grantOption) {
 +        // If this is a grant only change, we don't need to rebuild the user mappings.
 +        getHBase().dropPrincipalFromRole(role.getRoleName(), userName, principalType, grantOption);
 +      } else {
 +        Set<String> usersToRemap = findUsersToRemapRolesFor(role, userName, principalType);
 +        getHBase().dropPrincipalFromRole(role.getRoleName(), userName, principalType, grantOption);
 +        for (String user : usersToRemap) {
 +          getHBase().buildRoleMapForUser(user);
 +        }
 +      }
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to revoke role " + role.getRoleName() + " from " + userName, e);
 +      throw new MetaException("Unable to revoke role " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List<String> groupNames)
 +      throws InvalidObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet();
 +      PrincipalPrivilegeSet global = getHBase().getGlobalPrivs();
 +      if (global == null) return null;
 +      List<PrivilegeGrantInfo> pgi;
 +      if (global.getUserPrivileges() != null) {
 +        pgi = global.getUserPrivileges().get(userName);
 +        if (pgi != null) {
 +          pps.putToUserPrivileges(userName, pgi);
 +        }
 +      }
 +
 +      if (global.getRolePrivileges() != null) {
 +        List<String> roles = getHBase().getUserRoles(userName);
 +        if (roles != null) {
 +          for (String role : roles) {
 +            pgi = global.getRolePrivileges().get(role);
 +            if (pgi != null) {
 +              pps.putToRolePrivileges(role, pgi);
 +            }
 +          }
 +        }
 +      }
 +      commit = true;
 +      return pps;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get db privileges for user", e);
 +      throw new MetaException("Unable to get db privileges for user, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName,
 +                                                 List<String> groupNames)
 +      throws InvalidObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet();
 +      Database db = getHBase().getDb(dbName);
 +      if (db.getPrivileges() != null) {
 +        List<PrivilegeGrantInfo> pgi;
 +        // Find the user privileges for this db
 +        if (db.getPrivileges().getUserPrivileges() != null) {
 +          pgi = db.getPrivileges().getUserPrivileges().get(userName);
 +          if (pgi != null) {
 +            pps.putToUserPrivileges(userName, pgi);
 +          }
 +        }
 +
 +        if (db.getPrivileges().getRolePrivileges() != null) {
 +          List<String> roles = getHBase().getUserRoles(userName);
 +          if (roles != null) {
 +            for (String role : roles) {
 +              pgi = db.getPrivileges().getRolePrivileges().get(role);
 +              if (pgi != null) {
 +                pps.putToRolePrivileges(role, pgi);
 +              }
 +            }
 +          }
 +        }
 +      }
 +      commit = true;
 +      return pps;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get db privileges for user", e);
 +      throw new MetaException("Unable to get db privileges for user, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName,
 +                                                    String userName, List<String> groupNames)
 +      throws InvalidObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet();
 +      Table table = getHBase().getTable(dbName, tableName);
 +      List<PrivilegeGrantInfo> pgi;
 +      if (table.getPrivileges() != null) {
 +        if (table.getPrivileges().getUserPrivileges() != null) {
 +          pgi = table.getPrivileges().getUserPrivileges().get(userName);
 +          if (pgi != null) {
 +            pps.putToUserPrivileges(userName, pgi);
 +          }
 +        }
 +
 +        if (table.getPrivileges().getRolePrivileges() != null) {
 +          List<String> roles = getHBase().getUserRoles(userName);
 +          if (roles != null) {
 +            for (String role : roles) {
 +              pgi = table.getPrivileges().getRolePrivileges().get(role);
 +              if (pgi != null) {
 +                pps.putToRolePrivileges(role, pgi);
 +              }
 +            }
 +          }
 +        }
 +      }
 +      commit = true;
 +      return pps;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get db privileges for user", e);
 +      throw new MetaException("Unable to get db privileges for user, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName,
 +                                                        String partition, String userName,
 +                                                        List<String> groupNames) throws
 +      InvalidObjectException, MetaException {
 +    // We don't support partition privileges
 +    return null;
 +  }
 +
 +  @Override
 +  public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName,
 +                                                     String partitionName, String columnName,
 +                                                     String userName,
 +                                                     List<String> groupNames) throws
 +      InvalidObjectException, MetaException {
 +    // We don't support column level privileges
 +    return null;
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
 +                                                             PrincipalType principalType) {
 +    List<PrivilegeGrantInfo> grants;
 +    List<HiveObjectPrivilege> privileges = new ArrayList<HiveObjectPrivilege>();
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      PrincipalPrivilegeSet pps = getHBase().getGlobalPrivs();
 +      if (pps == null) return privileges;
 +      Map<String, List<PrivilegeGrantInfo>> map;
 +      switch (principalType) {
 +        case USER:
 +          map = pps.getUserPrivileges();
 +          break;
 +
 +        case ROLE:
 +          map = pps.getRolePrivileges();
 +          break;
 +
 +        default:
 +          throw new RuntimeException("Unknown or unsupported principal type " +
 +              principalType.toString());
 +      }
 +      if (map == null) return privileges;
 +      grants = map.get(principalName);
 +
 +      if (grants == null || grants.size() == 0) return privileges;
 +      for (PrivilegeGrantInfo pgi : grants) {
 +        privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null,
 +            null, null, null), principalName, principalType, pgi));
 +      }
 +      commit = true;
 +      return privileges;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
 +                                                         PrincipalType principalType,
 +                                                         String dbName) {
 +    List<PrivilegeGrantInfo> grants;
 +    List<HiveObjectPrivilege> privileges = new ArrayList<HiveObjectPrivilege>();
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Database db = getHBase().getDb(dbName);
 +      if (db == null) return privileges;
 +      PrincipalPrivilegeSet pps = db.getPrivileges();
 +      if (pps == null) return privileges;
 +      Map<String, List<PrivilegeGrantInfo>> map;
 +      switch (principalType) {
 +        case USER:
 +          map = pps.getUserPrivileges();
 +          break;
 +
 +        case ROLE:
 +          map = pps.getRolePrivileges();
 +          break;
 +
 +        default:
 +          throw new RuntimeException("Unknown or unsupported principal type " +
 +              principalType.toString());
 +      }
 +      if (map == null) return privileges;
 +      grants = map.get(principalName);
 +
 +      if (grants == null || grants.size() == 0) return privileges;
 +      for (PrivilegeGrantInfo pgi : grants) {
 +        privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName,
 +         null, null, null), principalName, principalType, pgi));
 +      }
 +      commit = true;
 +      return privileges;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
 +                                                      PrincipalType principalType,
 +                                                      String dbName,
 +                                                      String tableName) {
 +    List<PrivilegeGrantInfo> grants;
 +    List<HiveObjectPrivilege> privileges = new ArrayList<HiveObjectPrivilege>();
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Table table = getHBase().getTable(dbName, tableName);
 +      if (table == null) return privileges;
 +      PrincipalPrivilegeSet pps = table.getPrivileges();
 +      if (pps == null) return privileges;
 +      Map<String, List<PrivilegeGrantInfo>> map;
 +      switch (principalType) {
 +        case USER:
 +          map = pps.getUserPrivileges();
 +          break;
 +
 +        case ROLE:
 +          map = pps.getRolePrivileges();
 +          break;
 +
 +        default:
 +          throw new RuntimeException("Unknown or unsupported principal type " +
 +              principalType.toString());
 +      }
 +      if (map == null) return privileges;
 +      grants = map.get(principalName);
 +
 +      if (grants == null || grants.size() == 0) return privileges;
 +      for (PrivilegeGrantInfo pgi : grants) {
 +        privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName,
 +            tableName, null, null), principalName, principalType, pgi));
 +      }
 +      commit = true;
 +      return privileges;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
 +                                                                PrincipalType principalType,
 +                                                                String dbName,
 +                                                                String tableName,
 +                                                                List<String> partValues,
 +                                                                String partName) {
 +    // We don't support partition grants
 +    return new ArrayList<HiveObjectPrivilege>();
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
 +                                                                  PrincipalType principalType,
 +                                                                  String dbName, String tableName,
 +                                                                  String columnName) {
 +    // We don't support column grants
 +    return new ArrayList<HiveObjectPrivilege>();
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(String principalName,
 +                                                                      PrincipalType principalType,
 +                                                                      String dbName,
 +                                                                      String tableName,
 +                                                                      List<String> partVals,
 +                                                                      String partName,
 +                                                                      String columnName) {
 +    // We don't support column grants
 +    return new ArrayList<HiveObjectPrivilege>();
 +  }
 +
 +  @Override
 +  public boolean grantPrivileges(PrivilegeBag privileges)
 +      throws InvalidObjectException, MetaException, NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      for (HiveObjectPrivilege priv : privileges.getPrivileges()) {
 +        // Locate the right object to deal with
 +        PrivilegeInfo privilegeInfo = findPrivilegeToGrantOrRevoke(priv);
 +
 +        // Now, let's see if we've already got this privilege
 +        for (PrivilegeGrantInfo info : privilegeInfo.grants) {
 +          if (info.getPrivilege().equals(priv.getGrantInfo().getPrivilege())) {
 +            throw new InvalidObjectException(priv.getPrincipalName() + " already has " +
 +                priv.getGrantInfo().getPrivilege() + " on " + privilegeInfo.typeErrMsg);
 +          }
 +        }
 +        privilegeInfo.grants.add(priv.getGrantInfo());
 +
 +        writeBackGrantOrRevoke(priv, privilegeInfo);
 +      }
 +      commit = true;
 +      return true;
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) throws
 +      InvalidObjectException, MetaException, NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      for (HiveObjectPrivilege priv : privileges.getPrivileges()) {
 +        PrivilegeInfo privilegeInfo = findPrivilegeToGrantOrRevoke(priv);
 +
 +        for (int i = 0; i < privilegeInfo.grants.size(); i++) {
 +          if (privilegeInfo.grants.get(i).getPrivilege().equals(
 +              priv.getGrantInfo().getPrivilege())) {
 +            if (grantOption) privilegeInfo.grants.get(i).setGrantOption(false);
 +            else privilegeInfo.grants.remove(i);
 +            break;
 +          }
 +        }
 +        writeBackGrantOrRevoke(priv, privilegeInfo);
 +      }
 +      commit = true;
 +      return true;
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  private static class PrivilegeInfo {
 +    Database db;
 +    Table table;
 +    List<PrivilegeGrantInfo> grants;
 +    String typeErrMsg;
 +    PrincipalPrivilegeSet privSet;
 +  }
 +
 +  private PrivilegeInfo findPrivilegeToGrantOrRevoke(HiveObjectPrivilege privilege)
 +      throws MetaException, NoSuchObjectException, InvalidObjectException {
 +    PrivilegeInfo result = new PrivilegeInfo();
 +    switch (privilege.getHiveObject().getObjectType()) {
 +      case GLOBAL:
 +        try {
 +          result.privSet = createOnNull(getHBase().getGlobalPrivs());
 +        } catch (IOException e) {
 +          LOG.error("Unable to fetch global privileges", e);
 +          throw new MetaException("Unable to fetch global privileges, " + e.getMessage());
 +        }
 +        result.typeErrMsg = "global";
 +        break;
 +
 +      case DATABASE:
 +        result.db = getDatabase(privilege.getHiveObject().getDbName());
 +        result.typeErrMsg = "database " + result.db.getName();
 +        result.privSet = createOnNull(result.db.getPrivileges());
 +        break;
 +
 +      case TABLE:
 +        result.table = getTable(privilege.getHiveObject().getDbName(),
 +            privilege.getHiveObject().getObjectName());
 +        result.typeErrMsg = "table " + result.table.getTableName();
 +        result.privSet = createOnNull(result.table.getPrivileges());
 +        break;
 +
 +      case PARTITION:
 +      case COLUMN:
 +        throw new RuntimeException("HBase metastore does not support partition or column " +
 +            "permissions");
 +
 +      default:
 +        throw new RuntimeException("Woah bad, unknown object type " +
 +            privilege.getHiveObject().getObjectType());
 +    }
 +
 +    // Locate the right PrivilegeGrantInfo
 +    Map<String, List<PrivilegeGrantInfo>> grantInfos;
 +    switch (privilege.getPrincipalType()) {
 +      case USER:
 +        grantInfos = result.privSet.getUserPrivileges();
 +        result.typeErrMsg = "user";
 +        break;
 +
 +      case GROUP:
 +        throw new RuntimeException("HBase metastore does not support group permissions");
 +
 +      case ROLE:
 +        grantInfos = result.privSet.getRolePrivileges();
 +        result.typeErrMsg = "role";
 +        break;
 +
 +      default:
 +        throw new RuntimeException("Woah bad, unknown principal type " +
 +            privilege.getPrincipalType());
 +    }
 +
 +    // Find the requested name in the grantInfo
 +    result.grants = grantInfos.get(privilege.getPrincipalName());
 +    if (result.grants == null) {
 +      // Means we don't have any grants for this user yet.
 +      result.grants = new ArrayList<PrivilegeGrantInfo>();
 +      grantInfos.put(privilege.getPrincipalName(), result.grants);
 +    }
 +    return result;
 +  }
 +
 +  private PrincipalPrivilegeSet createOnNull(PrincipalPrivilegeSet pps) {
 +    // If this is the first time a user has been granted a privilege set will be null.
 +    if (pps == null) {
 +      pps = new PrincipalPrivilegeSet();
 +    }
 +    if (pps.getUserPrivileges() == null) {
 +      pps.setUserPrivileges(new HashMap<String, List<PrivilegeGrantInfo>>());
 +    }
 +    if (pps.getRolePrivileges() == null) {
 +      pps.setRolePrivileges(new HashMap<String, List<PrivilegeGrantInfo>>());
 +    }
 +    return pps;
 +  }
 +
 +  private void writeBackGrantOrRevoke(HiveObjectPrivilege priv, PrivilegeInfo pi)
 +      throws MetaException, NoSuchObjectException, InvalidObjectException {
 +    // Now write it back
 +    switch (priv.getHiveObject().getObjectType()) {
 +      case GLOBAL:
 +        try {
 +          getHBase().putGlobalPrivs(pi.privSet);
 +        } catch (IOException e) {
 +          LOG.error("Unable to write global privileges", e);
 +          throw new MetaException("Unable to write global privileges, " + e.getMessage());
 +        }
 +        break;
 +
 +      case DATABASE:
 +        pi.db.setPrivileges(pi.privSet);
 +        alterDatabase(pi.db.getName(), pi.db);
 +        break;
 +
 +      case TABLE:
 +        pi.table.setPrivileges(pi.privSet);
 +        alterTable(pi.table.getDbName(), pi.table.getTableName(), pi.table);
 +        break;
 +
 +      default:
 +        throw new RuntimeException("Dude, you missed the second switch!");
 +    }
 +  }
 +
 +  @Override
 +  public Role getRole(String roleName) throws NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Role role = getHBase().getRole(roleName);
 +      if (role == null) {
 +        throw new NoSuchObjectException("Unable to find role " + roleName);
 +      }
 +      commit = true;
 +      return role;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get role", e);
 +      throw new NoSuchObjectException("Error reading table " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<String> listRoleNames() {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Role> roles = getHBase().scanRoles();
 +      List<String> roleNames = new ArrayList<String>(roles.size());
 +      for (Role role : roles) roleNames.add(role.getRoleName());
 +      commit = true;
 +      return roleNames;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<Role> listRoles(String principalName, PrincipalType principalType) {
 +    List<Role> roles = new ArrayList<Role>();
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      try {
 +        roles.addAll(getHBase().getPrincipalDirectRoles(principalName, principalType));
 +      } catch (IOException e) {
 +        throw new RuntimeException(e);
 +      }
 +      // Add the public role if this is a user
 +      if (principalType == PrincipalType.USER) {
 +        roles.add(new Role(HiveMetaStore.PUBLIC, 0, null));
 +      }
 +      commit = true;
 +      return roles;
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
 +                                                      PrincipalType principalType) {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Role> roles = listRoles(principalName, principalType);
 +      List<RolePrincipalGrant> rpgs = new ArrayList<RolePrincipalGrant>(roles.size());
 +      for (Role role : roles) {
 +        HbaseMetastoreProto.RoleGrantInfoList grants = getHBase().getRolePrincipals(role.getRoleName());
 +        if (grants != null) {
 +          for (HbaseMetastoreProto.RoleGrantInfo grant : grants.getGrantInfoList()) {
 +            if (grant.getPrincipalType() == HBaseUtils.convertPrincipalTypes(principalType) &&
 +                grant.getPrincipalName().equals(principalName)) {
 +              rpgs.add(new RolePrincipalGrant(role.getRoleName(), principalName, principalType,
 +                  grant.getGrantOption(), (int) grant.getAddTime(), grant.getGrantor(),
 +                  HBaseUtils.convertPrincipalTypes(grant.getGrantorType())));
 +            }
 +          }
 +        }
 +      }
 +      commit = true;
 +      return rpgs;
 +    } catch (Exception e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<RolePrincipalGrant> listRoleMembers(String roleName) {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      HbaseMetastoreProto.RoleGrantInfoList gil = getHBase().getRolePrincipals(roleName);
 +      List<RolePrincipalGrant> roleMaps = new ArrayList<RolePrincipalGrant>(gil.getGrantInfoList().size());
 +      for (HbaseMetastoreProto.RoleGrantInfo giw : gil.getGrantInfoList()) {
 +        roleMaps.add(new RolePrincipalGrant(roleName, giw.getPrincipalName(),
 +            HBaseUtils.convertPrincipalTypes(giw.getPrincipalType()),
 +            giw.getGrantOption(), (int)giw.getAddTime(), giw.getGrantor(),
 +            HBaseUtils.convertPrincipalTypes(giw.getGrantorType())));
 +      }
 +      commit = true;
 +      return roleMaps;
 +    } catch (Exception e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public Partition getPartitionWithAuth(String dbName, String tblName, List<String> partVals,
 +                                        String user_name, List<String> group_names)
 +      throws MetaException, NoSuchObjectException, InvalidObjectException {
 +    // We don't do authorization checks for partitions.
 +    return getPartition(dbName, tblName, partVals);
 +  }
 +
 +  @Override
 +  public List<Partition> getPartitionsWithAuth(String dbName, String tblName, short maxParts,
 +                                               String userName, List<String> groupNames)
 +      throws MetaException, NoSuchObjectException, InvalidObjectException {
 +    // We don't do authorization checks for partitions.
 +    return getPartitions(dbName, tblName, maxParts);
 +  }
 +
 +  @Override
 +  public List<String> listPartitionNamesPs(String db_name, String tbl_name, List<String> part_vals,
 +                                           short max_parts)
 +      throws MetaException, NoSuchObjectException {
 +    List<Partition> parts =
 +        listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, null, null);
 +    List<String> partNames = new ArrayList<String>(parts.size());
 +    for (Partition part : parts) {
 +      partNames.add(buildExternalPartName(db_name, tbl_name, part.getValues()));
 +    }
 +    return partNames;
 +  }
 +
 +
 +  @Override
 +  public List<Partition> listPartitionsPsWithAuth(String db_name, String tbl_name,
 +                                                  List<String> part_vals, short max_parts,
 +                                                  String userName, List<String> groupNames)
 +      throws MetaException, NoSuchObjectException {
 +    // We don't handle auth info with partitions
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Partition> parts = getHBase().scanPartitions(db_name, tbl_name, part_vals, max_parts);
 +      commit = true;
 +      return parts;
 +    } catch (IOException e) {
 +      LOG.error("Unable to list partition names", e);
 +      throw new MetaException("Failed to list part names, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws
 +      NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().updateStatistics(colStats.getStatsDesc().getDbName(),
 +          colStats.getStatsDesc().getTableName(), null, colStats);
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to update column statistics", e);
 +      throw new MetaException("Failed to update column statistics, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
 +                                                 List<String> partVals) throws
 +      NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().updateStatistics(statsObj.getStatsDesc().getDbName(),
 +          statsObj.getStatsDesc().getTableName(), partVals, statsObj);
 +      // We need to invalidate aggregates that include this partition
 +      getHBase().getStatsCache().invalidate(statsObj.getStatsDesc().getDbName(),
 +          statsObj.getStatsDesc().getTableName(), statsObj.getStatsDesc().getPartName());
 +      commit = true;
 +      return true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to update column statistics", e);
 +      throw new MetaException("Failed to update column statistics, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public ColumnStatistics getTableColumnStatistics(String dbName, String tableName,
 +                                                   List<String> colName) throws MetaException,
 +      NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      ColumnStatistics cs = getHBase().getTableStatistics(dbName, tableName, colName);
 +      commit = true;
 +      return cs;
 +    } catch (IOException e) {
 +      LOG.error("Unable to fetch column statistics", e);
 +      throw new MetaException("Failed to fetch column statistics, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<ColumnStatistics> getPartitionColumnStatistics(String dbName, String tblName,
 +      List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException {
 +    List<List<String>> partVals = new ArrayList<List<String>>(partNames.size());
 +    for (String partName : partNames) {
 +      partVals.add(partNameToVals(partName));
 +    }
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<ColumnStatistics> cs =
 +          getHBase().getPartitionStatistics(dbName, tblName, partNames,  partVals, colNames);
 +      commit = true;
 +      return cs;
 +    } catch (IOException e) {
 +      LOG.error("Unable to fetch column statistics", e);
 +      throw new MetaException("Failed fetching column statistics, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
 +      List<String> partVals, String colName) throws NoSuchObjectException, MetaException,
 +      InvalidObjectException, InvalidInputException {
 +    // NOP, stats will be deleted along with the partition when it is dropped.
 +    return true;
 +  }
 +
 +  @Override
 +  public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws
 +      NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
 +    // NOP, stats will be deleted along with the table when it is dropped.
 +    return true;
 +  }
 +
 +  /**
 +   * Return aggregated statistics for each column in the colNames list aggregated over partitions in
 +   * the partNames list
 +   *
 +   */
 +  @Override
 +  public AggrStats get_aggr_stats_for(String dbName, String tblName, List<String> partNames,
 +      List<String> colNames) throws MetaException, NoSuchObjectException {
 +    List<List<String>> partVals = new ArrayList<List<String>>(partNames.size());
 +    for (String partName : partNames) {
 +      partVals.add(partNameToVals(partName));
 +    }
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      AggrStats aggrStats = new AggrStats();
 +      for (String colName : colNames) {
 +        try {
 +          AggrStats oneCol =
 +              getHBase().getStatsCache().get(dbName, tblName, partNames, colName);
 +          if (oneCol.getColStatsSize() > 0) {
 +            assert oneCol.getColStatsSize() == 1;
 +            aggrStats.setPartsFound(aggrStats.getPartsFound() + oneCol.getPartsFound());
 +            aggrStats.addToColStats(oneCol.getColStats().get(0));
 +          }
 +        } catch (CacheLoader.InvalidCacheLoadException e) {
 +          LOG.debug("Found no stats for column " + colName);
 +          // This means we have no stats at all for this column for these partitions, so just
 +          // move on.
 +        }
 +      }
 +      commit = true;
 +      return aggrStats;
 +    } catch (IOException e) {
 +      LOG.error("Unable to fetch aggregate column statistics", e);
 +      throw new MetaException("Failed fetching aggregate column statistics, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public long cleanupEvents() {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public boolean addToken(String tokenIdentifier, String delegationToken) {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().putDelegationToken(tokenIdentifier, delegationToken);
 +      commit = true;
 +      return commit; // See HIVE-11302, for now always returning true
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean removeToken(String tokenIdentifier) {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().deleteDelegationToken(tokenIdentifier);
 +      commit = true;
 +      return commit; // See HIVE-11302, for now always returning true
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public String getToken(String tokenIdentifier) {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      String token = getHBase().getDelegationToken(tokenIdentifier);
 +      commit = true;
 +      return token;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<String> getAllTokenIdentifiers() {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<String> ids = getHBase().scanDelegationTokenIdentifiers();
 +      commit = true;
 +      return ids;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public int addMasterKey(String key) throws MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      long seq = getHBase().getNextSequence(HBaseReadWrite.MASTER_KEY_SEQUENCE);
 +      getHBase().putMasterKey((int) seq, key);
 +      commit = true;
 +      return (int)seq;
 +    } catch (IOException e) {
 +      LOG.error("Unable to add master key", e);
 +      throw new MetaException("Failed adding master key, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public void updateMasterKey(Integer seqNo, String key) throws NoSuchObjectException,
 +      MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      if (getHBase().getMasterKey(seqNo) == null) {
 +        throw new NoSuchObjectException("No key found with keyId: " + seqNo);
 +      }
 +      getHBase().putMasterKey(seqNo, key);
 +      commit = true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to update master key", e);
 +      throw new MetaException("Failed updating master key, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public boolean removeMasterKey(Integer keySeq) {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().deleteMasterKey(keySeq);
 +      commit = true;
 +      return true;  // See HIVE-11302, for now always returning true
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public String[] getMasterKeys() {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<String> keys = getHBase().scanMasterKeys();
 +      commit = true;
 +      return keys.toArray(new String[keys.size()]);
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public void verifySchema() throws MetaException {
 +
 +  }
 +
 +  @Override
 +  public String getMetaStoreSchemaVersion() throws MetaException {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public void dropPartitions(String dbName, String tblName, List<String> partNames) throws
 +      MetaException, NoSuchObjectException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      for (String partName : partNames) {
 +        dropPartition(dbName, tblName, partNameToVals(partName));
 +      }
 +      commit = true;
 +    } catch (Exception e) {
 +      LOG.error("Unable to drop partitions", e);
 +      throw new NoSuchObjectException("Failure dropping partitions, " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(String principalName,
 +                                                            PrincipalType principalType) {
 +    List<HiveObjectPrivilege> privileges = new ArrayList<HiveObjectPrivilege>();
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Database> dbs = getHBase().scanDatabases(null);
 +      for (Database db : dbs) {
 +        List<PrivilegeGrantInfo> grants;
 +
 +        PrincipalPrivilegeSet pps = db.getPrivileges();
 +        if (pps == null) continue;
 +        Map<String, List<PrivilegeGrantInfo>> map;
 +        switch (principalType) {
 +          case USER:
 +            map = pps.getUserPrivileges();
 +            break;
 +
 +          case ROLE:
 +            map = pps.getRolePrivileges();
 +            break;
 +
 +          default:
 +            throw new RuntimeException("Unknown or unsupported principal type " +
 +                principalType.toString());
 +        }
 +
 +        if (map == null) continue;
 +        grants = map.get(principalName);
 +        if (grants == null || grants.size() == 0) continue;
 +        for (PrivilegeGrantInfo pgi : grants) {
 +          privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE,
 +              db.getName(), null, null, null), principalName, principalType, pgi));
 +        }
 +      }
 +      commit = true;
 +      return privileges;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(String principalName,
 +                                                               PrincipalType principalType) {
 +    List<HiveObjectPrivilege> privileges = new ArrayList<HiveObjectPrivilege>();
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Table> tables = getHBase().scanTables(null, null);
 +      for (Table table : tables) {
 +        List<PrivilegeGrantInfo> grants;
 +
 +        PrincipalPrivilegeSet pps = table.getPrivileges();
 +        if (pps == null) continue;
 +        Map<String, List<PrivilegeGrantInfo>> map;
 +        switch (principalType) {
 +          case USER:
 +            map = pps.getUserPrivileges();
 +            break;
 +
 +          case ROLE:
 +            map = pps.getRolePrivileges();
 +            break;
 +
 +          default:
 +            throw new RuntimeException("Unknown or unsupported principal type " +
 +                principalType.toString());
 +        }
 +
 +        if (map == null) continue;
 +        grants = map.get(principalName);
 +        if (grants == null || grants.size() == 0) continue;
 +        for (PrivilegeGrantInfo pgi : grants) {
 +          privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE,
 +              table.getDbName(), table.getTableName(), null, null), principalName, principalType,
 +              pgi));
 +        }
 +      }
 +      commit = true;
 +      return privileges;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(String principalName,
 +                                                                   PrincipalType principalType) {
 +    return new ArrayList<HiveObjectPrivilege>();
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(String principalName,
 +                                                                     PrincipalType principalType) {
 +    return new ArrayList<HiveObjectPrivilege>();
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(String principalName,
 +                                                                         PrincipalType principalType) {
 +    return new ArrayList<HiveObjectPrivilege>();
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listGlobalGrantsAll() {
 +    List<HiveObjectPrivilege> privileges = new ArrayList<HiveObjectPrivilege>();
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      PrincipalPrivilegeSet pps = getHBase().getGlobalPrivs();
 +      if (pps != null) {
 +        for (Map.Entry<String, List<PrivilegeGrantInfo>> e : pps.getUserPrivileges().entrySet()) {
 +          for (PrivilegeGrantInfo pgi : e.getValue()) {
 +            privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null,
 +                null, null, null), e.getKey(), PrincipalType.USER, pgi));
 +          }
 +        }
 +        for (Map.Entry<String, List<PrivilegeGrantInfo>> e : pps.getRolePrivileges().entrySet()) {
 +          for (PrivilegeGrantInfo pgi : e.getValue()) {
 +            privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.GLOBAL, null,
 +                null, null, null), e.getKey(), PrincipalType.ROLE, pgi));
 +          }
 +        }
 +      }
 +      commit = true;
 +      return privileges;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listDBGrantsAll(String dbName) {
 +    List<HiveObjectPrivilege> privileges = new ArrayList<HiveObjectPrivilege>();
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Database db = getHBase().getDb(dbName);
 +      PrincipalPrivilegeSet pps = db.getPrivileges();
 +      if (pps != null) {
 +        for (Map.Entry<String, List<PrivilegeGrantInfo>> e : pps.getUserPrivileges().entrySet()) {
 +          for (PrivilegeGrantInfo pgi : e.getValue()) {
 +            privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName,
 +                null, null, null), e.getKey(), PrincipalType.USER, pgi));
 +          }
 +        }
 +        for (Map.Entry<String, List<PrivilegeGrantInfo>> e : pps.getRolePrivileges().entrySet()) {
 +          for (PrivilegeGrantInfo pgi : e.getValue()) {
 +            privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.DATABASE, dbName,
 +                null, null, null), e.getKey(), PrincipalType.ROLE, pgi));
 +          }
 +        }
 +      }
 +      commit = true;
 +      return privileges;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String dbName, String tableName,
 +                                                                String partitionName,
 +                                                                String columnName) {
 +    return new ArrayList<HiveObjectPrivilege>();
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listTableGrantsAll(String dbName, String tableName) {
 +    List<HiveObjectPrivilege> privileges = new ArrayList<HiveObjectPrivilege>();
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Table table = getHBase().getTable(dbName, tableName);
 +      PrincipalPrivilegeSet pps = table.getPrivileges();
 +      if (pps != null) {
 +        for (Map.Entry<String, List<PrivilegeGrantInfo>> e : pps.getUserPrivileges().entrySet()) {
 +          for (PrivilegeGrantInfo pgi : e.getValue()) {
 +            privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName,
 +                tableName, null, null), e.getKey(), PrincipalType.USER, pgi));
 +          }
 +        }
 +        for (Map.Entry<String, List<PrivilegeGrantInfo>> e : pps.getRolePrivileges().entrySet()) {
 +          for (PrivilegeGrantInfo pgi : e.getValue()) {
 +            privileges.add(new HiveObjectPrivilege(new HiveObjectRef(HiveObjectType.TABLE, dbName,
 +                tableName, null, null), e.getKey(), PrincipalType.ROLE, pgi));
 +          }
 +        }
 +      }
 +      commit = true;
 +      return privileges;
 +    } catch (IOException e) {
 +      throw new RuntimeException(e);
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listPartitionGrantsAll(String dbName, String tableName,
 +                                                          String partitionName) {
 +    return new ArrayList<HiveObjectPrivilege>();
 +  }
 +
 +  @Override
 +  public List<HiveObjectPrivilege> listTableColumnGrantsAll(String dbName, String tableName,
 +                                                            String columnName) {
 +    return new ArrayList<HiveObjectPrivilege>();
 +  }
 +
 +  @Override
 +  public void createFunction(Function func) throws InvalidObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().putFunction(func);
 +      commit = true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to create function", e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public void alterFunction(String dbName, String funcName, Function newFunction) throws
 +      InvalidObjectException, MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().putFunction(newFunction);
 +      commit = true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to alter function ", e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public void dropFunction(String dbName, String funcName) throws MetaException,
 +      NoSuchObjectException, InvalidObjectException, InvalidInputException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      getHBase().deleteFunction(dbName, funcName);
 +      commit = true;
 +    } catch (IOException e) {
 +      LOG.error("Unable to delete function" + e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public Function getFunction(String dbName, String funcName) throws MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      Function func = getHBase().getFunction(dbName, funcName);
 +      commit = true;
 +      return func;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get function" + e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
++  public List<Function> getAllFunctions() throws MetaException {
++    boolean commit = false;
++    openTransaction();
++    try {
++      List<Function> funcs = getHBase().scanFunctions(null, ".*");
++      commit = true;
++      return funcs;
++    } catch (IOException e) {
++      LOG.error("Unable to get functions" + e);
++      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
++    } finally {
++      commitOrRoleBack(commit);
++    }
++  }
++
++  @Override
 +  public List<String> getFunctions(String dbName, String pattern) throws MetaException {
 +    boolean commit = false;
 +    openTransaction();
 +    try {
 +      List<Function> funcs = getHBase().scanFunctions(dbName, likeToRegex(pattern));
 +      List<String> funcNames = new ArrayList<String>(funcs.size());
 +      for (Function func : funcs) funcNames.add(func.getFunctionName());
 +      commit = true;
 +      return funcNames;
 +    } catch (IOException e) {
 +      LOG.error("Unable to get functions" + e);
 +      throw new MetaException("Unable to read from or write to hbase " + e.getMessage());
 +    } finally {
 +      commitOrRoleBack(commit);
 +    }
 +  }
 +
 +  @Override
 +  public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public void addNotificationEvent(NotificationEvent event) {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public void cleanNotificationEvents(int olderThan) {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public CurrentNotificationEventId getCurrentNotificationEventId() {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  @Override
 +  public void flushCache() {
 +    getHBase().flushCatalogCache();
 +  }
 +
 +  @Override
 +  public void setConf(Configuration configuration) {
 +    // initialize expressionProxy. Also re-initialize it if
 +    // setConf is being called with new configuration object (though that
 +    // is not expected to happen, doing it just for safety)
 +    if(expressionProxy == null || conf != configuration) {
 +      expressionProxy = PartFilterExprUtil.createExpressionProxy(configuration);
 +    }
 +    conf = configuration;
 +  }
 +
 +  @Override
 +  public Configuration getConf() {
 +    return conf;
 +
 +  }
 +
 +  private HBaseReadWrite getHBase() {
 +    if (hbase == null) hbase = HBaseReadWrite.getInstance(conf);
 +    return hbase;
 +  }
 +
 +  // This is for building error messages only.  It does not look up anything in the metastore.
 +  private String tableNameForErrorMsg(String dbName, String tableName) {
 +    return dbName + "." + tableName;
 +  }
 +
 +  // This is for building error messages only.  It does not look up anything in the metastore as
 +  // they may just throw another error.
 +  private String partNameForErrorMsg(String dbName, String tableName, List<String> partVals) {
 +    return tableNameForErrorMsg(dbName, tableName) + "." + StringUtils.join(partVals, ':');
 +  }
 +
 +  private String buildExternalPartName(Table table, Partition part) {
 +    return buildExternalPartName(table, part.getValues());
 +  }
 +
 +  private String buildExternalPartName(String dbName, String tableName, List<String> partVals)
 +      throws MetaException {
 +    return buildExternalPartName(getTable(dbName, tableName), partVals);
 +  }
 +
 +  private Set<String> findUsersToRemapRolesFor(Role role, String principalName, PrincipalType type)
 +      throws IOException, NoSuchObjectException {
 +    Set<String> usersToRemap;
 +    switch (type) {
 +      case USER:
 +        // In this case it's just the user being added to the role that we need to remap for.
 +        usersToRemap = new HashSet<String>();
 +        usersToRemap.add(principalName);
 +        break;
 +
 +      case ROLE:
 +        // In this case we need to remap for all users in the containing role (not the role being
 +        // granted into the containing role).
 +        usersToRemap = getHBase().findAllUsersInRole(role.getRoleName());
 +        break;
 +
 +      default:
 +        throw new RuntimeException("Unknown principal type " + type);
 +
 +    }
 +    return usersToRemap;
 +  }
 +
 +  /**
 +   * Build a partition name for external use.  Necessary since HBase itself doesn't store
 +   * partition names.
 +   * @param table  table object
 +   * @param partVals partition values.
 +   * @return
 +   */
 +  static String buildExternalPartName(Table table, List<String> partVals) {
 +    List<String> partCols = new ArrayList<String>();
 +    for (FieldSchema pc : table.getPartitionKeys()) partCols.add(pc.getName());
 +    return FileUtils.makePartName(partCols, partVals);
 +  }
 +
 +  private static List<String> partNameToVals(String name) {
 +    if (name == null) return null;
 +    List<String> vals = new ArrayList<String>();
 +    String[] kvp = name.split("/");
 +    for (String kv : kvp) {
 +      vals.add(kv.substring(kv.indexOf('=') + 1));
 +    }
 +    return vals;
 +  }
 +
 +  static List<List<String>> partNameListToValsList(List<String> partNames) {
 +    List<List<String>> valLists = new ArrayList<List<String>>(partNames.size());
 +    for (String partName : partNames) {
 +      valLists.add(partNameToVals(partName));
 +    }
 +    return valLists;
 +  }
 +
 +  private String likeToRegex(String like) {
 +    if (like == null) return null;
 +    // Convert Hive's strange like syntax to Java regex.  Per
 +    // https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-Show
 +    // the supported syntax is that * means Java .* and | means 'or'
 +    // This implementation leaves other regular expression syntax alone, which means people can
 +    // use it, even though it wouldn't work on RDBMS backed metastores.
 +    return like.replace("*", ".*");
 +  }
 +
 +  private void commitOrRoleBack(boolean commit) {
 +    if (commit) {
 +      LOG.debug("Committing transaction");
 +      commitTransaction();
 +    } else {
 +      LOG.debug("Rolling back transaction");
 +      rollbackTransaction();
 +    }
 +  }
 +
 +  @VisibleForTesting HBaseReadWrite backdoor() {
 +    return getHBase();
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --cc metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 9831eb9,f184c56..6efadba
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@@ -19,8 -19,10 +19,9 @@@
  package org.apache.hadoop.hive.metastore;
  
  import java.util.ArrayList;
+ import java.util.Collections;
  import java.util.List;
  import java.util.Map;
 -import java.util.SortedSet;
  
  import org.apache.hadoop.conf.Configurable;
  import org.apache.hadoop.conf.Configuration;

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Adjacency.java
----------------------------------------------------------------------
diff --cc ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Adjacency.java
index 22ca225,5ea9b6e..b26ab96
--- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Adjacency.java
+++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Adjacency.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Adjacency implements org.apache.thrift.TBase<Adjacency, Adjacency._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Adjacency implements org.apache.thrift.TBase<Adjacency, Adjacency._Fields>, java.io.Serializable, Cloneable, Comparable<Adjacency> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Adjacency");
  
    private static final org.apache.thrift.protocol.TField NODE_FIELD_DESC = new org.apache.thrift.protocol.TField("node", org.apache.thrift.protocol.TType.STRING, (short)1);


[37/50] [abbrv] hive git commit: HIVE-11567: Some trace logs seeped through with new log4j2 changes (Prasanth Jayachandran reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11567: Some trace logs seeped through with new log4j2 changes (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cf0481fc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cf0481fc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cf0481fc

Branch: refs/heads/hbase-metastore
Commit: cf0481fcf26087dc2cd2de8b10bc2b13befa96ac
Parents: 0fab86c
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Fri Aug 14 14:04:10 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Fri Aug 14 14:04:10 2015 -0700

----------------------------------------------------------------------
 beeline/src/main/resources/beeline-log4j2.xml           | 2 +-
 common/src/test/resources/hive-exec-log4j2-test.xml     | 2 +-
 common/src/test/resources/hive-log4j2-test.xml          | 2 +-
 data/conf/hive-log4j2.xml                               | 2 +-
 hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml | 2 +-
 ql/src/main/resources/hive-exec-log4j2.xml              | 2 +-
 testutils/ptest2/src/main/resources/log4j2.xml          | 2 +-
 7 files changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/beeline/src/main/resources/beeline-log4j2.xml
----------------------------------------------------------------------
diff --git a/beeline/src/main/resources/beeline-log4j2.xml b/beeline/src/main/resources/beeline-log4j2.xml
index 5f09741..2349c5a 100644
--- a/beeline/src/main/resources/beeline-log4j2.xml
+++ b/beeline/src/main/resources/beeline-log4j2.xml
@@ -20,7 +20,7 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.threshold">DEBUG</Property>
     <Property name="hive.log.level">WARN</Property>
     <Property name="hive.root.logger">console</Property>
   </Properties>

http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/common/src/test/resources/hive-exec-log4j2-test.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/hive-exec-log4j2-test.xml b/common/src/test/resources/hive-exec-log4j2-test.xml
index b5f2cb4..1d91b26 100644
--- a/common/src/test/resources/hive-exec-log4j2-test.xml
+++ b/common/src/test/resources/hive-exec-log4j2-test.xml
@@ -20,7 +20,7 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.threshold">DEBUG</Property>
     <Property name="hive.log.level">INFO</Property>
     <Property name="hive.root.logger">FA</Property>
     <Property name="hive.log.dir">${sys:test.tmp.dir}/${sys:user.name}-TestHiveLogging</Property>

http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/common/src/test/resources/hive-log4j2-test.xml
----------------------------------------------------------------------
diff --git a/common/src/test/resources/hive-log4j2-test.xml b/common/src/test/resources/hive-log4j2-test.xml
index 63b46c8..98ca6f8 100644
--- a/common/src/test/resources/hive-log4j2-test.xml
+++ b/common/src/test/resources/hive-log4j2-test.xml
@@ -20,7 +20,7 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.threshold">DEBUG</Property>
     <Property name="hive.log.level">WARN</Property>
     <Property name="hive.root.logger">DRFA</Property>
     <Property name="hive.log.dir">${sys:test.tmp.dir}/${sys:user.name}-TestHiveLogging</Property>

http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/data/conf/hive-log4j2.xml
----------------------------------------------------------------------
diff --git a/data/conf/hive-log4j2.xml b/data/conf/hive-log4j2.xml
index ff8e4d3..51173a0 100644
--- a/data/conf/hive-log4j2.xml
+++ b/data/conf/hive-log4j2.xml
@@ -20,7 +20,7 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.threshold">DEBUG</Property>
     <Property name="hive.log.level">DEBUG</Property>
     <Property name="hive.root.logger">DRFA</Property>
     <Property name="hive.log.dir">${sys:test.tmp.dir}/log</Property>

http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml b/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
index 40da974..96f0974 100644
--- a/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
+++ b/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
@@ -20,7 +20,7 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="webhcat.log.threshold">ALL</Property>
+    <Property name="webhcat.log.threshold">DEBUG</Property>
     <Property name="webhcat.log.level">INFO</Property>
     <Property name="webhcat.root.logger">standard</Property>
     <Property name="webhcat.log.dir">.</Property>

http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/ql/src/main/resources/hive-exec-log4j2.xml
----------------------------------------------------------------------
diff --git a/ql/src/main/resources/hive-exec-log4j2.xml b/ql/src/main/resources/hive-exec-log4j2.xml
index c93437c..8b520a2 100644
--- a/ql/src/main/resources/hive-exec-log4j2.xml
+++ b/ql/src/main/resources/hive-exec-log4j2.xml
@@ -20,7 +20,7 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.threshold">DEBUG</Property>
     <Property name="hive.log.level">INFO</Property>
     <Property name="hive.root.logger">FA</Property>
     <Property name="hive.log.dir">${sys:java.io.tmpdir}/${sys:user.name}</Property>

http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/testutils/ptest2/src/main/resources/log4j2.xml
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/log4j2.xml b/testutils/ptest2/src/main/resources/log4j2.xml
index 992462e..6502ad1 100644
--- a/testutils/ptest2/src/main/resources/log4j2.xml
+++ b/testutils/ptest2/src/main/resources/log4j2.xml
@@ -20,7 +20,7 @@
  packages="org.apache.hadoop.hive.ql.log">
 
   <Properties>
-    <Property name="hive.ptest.log.threshold">ALL</Property>
+    <Property name="hive.ptest.log.threshold">DEBUG</Property>
     <Property name="hive.ptest.log.level">DEBUG</Property>
     <Property name="hive.ptest.root.logger">FILE</Property>
     <Property name="hive.ptest.log.dir">target</Property>


[09/50] [abbrv] hive git commit: HIVE-11398: Parse wide OR and wide AND trees to flat OR/AND trees (Jesus Camacho Rodriguez via Gopal V)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
index 5446ba5..f2cb3ec 100644
--- a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
@@ -147,7 +147,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((762 = cbigint) or (((UDFToFloat(csmallint) < cfloat) and ((UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint)))) or ((cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and ((cstring2 <> 'a') and ((79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))))))) (type: boolean)
+              predicate: ((762 = cbigint) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and (cstring2 <> 'a') and (79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cint (type: int), cdouble (type: double), csmallint (type: smallint), cfloat (type: float), ctinyint (type: tinyint)
@@ -353,7 +353,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or (((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or (((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))))) (type: boolean)
+              predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or ((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean)
               Statistics: Num rows: 6826 Data size: 1467614 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cint (type: int), cbigint (type: bigint), csmallint (type: smallint), cdouble (type: double), ctinyint (type: tinyint)
@@ -550,7 +550,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((ctimestamp1 = ctimestamp2) or ((762.0 = cfloat) or ((cstring1 = 'ss') or (((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and (ctimestamp2 is not null and (cstring2 > 'a'))))))) (type: boolean)
+              predicate: ((ctimestamp1 = ctimestamp2) or (762.0 = cfloat) or (cstring1 = 'ss') or ((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and ctimestamp2 is not null and (cstring2 > 'a'))) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cbigint (type: bigint), ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cdouble (type: double)
@@ -726,7 +726,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((ctimestamp2 <= ctimestamp1) and ((UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1))) or (((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0))) (type: boolean)
+              predicate: (((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or ((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0)) (type: boolean)
               Statistics: Num rows: 8874 Data size: 1907941 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ctinyint (type: tinyint), cbigint (type: bigint), cint (type: int), cfloat (type: float)
@@ -910,7 +910,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or (((1 <> cboolean2) and ((UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint)))) or (((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))))) (type: boolean)
+              predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or ((1 <> cboolean2) and (UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint))) or ((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))) (type: boolean)
               Statistics: Num rows: 9898 Data size: 2128105 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728 * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - UDFToDouble(cint)) (type: double), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - UDFToDouble(cint)) - -26.28) (type: double), (- cfloat) (type: float), (cdouble * -89010.0) (type: double), (UDFToDouble(ctinyint) / 988888.0) (type: double), (- ctinyint) (type: tinyint), (79.553 / UDFToDouble(ctinyint)) (type: double)
@@ -1161,7 +1161,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or ((cbigint = 359) or ((cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))))) (type: boolean)
+              predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or (cbigint = 359) or (cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (UDFToDouble(cbigint) % 79.553) (type: double), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569 % cbigint) (type: bigint), (359.0 - cdouble) (type: double), (- csmallint) (type: smallint)
@@ -1361,7 +1361,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or (((cdouble <= UDFToDouble(cbigint)) and ((cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble))) or ((UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))))) (type: boolean)
+              predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or ((cdouble <= UDFToDouble(cbigint)) and (cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble)) or (UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))) (type: boolean)
               Statistics: Num rows: 10922 Data size: 2348269 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cint (type: int), cstring1 (type: string), cboolean2 (type: boolean), ctimestamp2 (type: timestamp), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), cboolean1 (type: boolean), (cint + UDFToInteger(csmallint)) (type: int), (cbigint - UDFToLong(ctinyint)) (type: bigint), (- cbigint) (type: bigint), (- cfloat) (type: float), ((cbigint - UDFToLong(ctinyint)) + cbigint) (type: bigint), (cdouble / cdouble) (type: double), (- cdouble) (type: double), (UDFToLong((cint + UDFToInteger(csmallint))) * (- cbigint)) (type: bigint), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (-1.389 / UDFToDouble(ctinyint)) (type: double), (UDFToDouble(cbigint) % cdouble) (type: double), (- csmallint) (type: smallint), (UDFToInteger(csmallint) + (cint + UDFToInteger(csmallint))) (type: int)
@@ -1620,7 +1620,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((-1.389 >= UDFToDouble(cint)) and ((csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint)))) or (((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint))))) (type: boolean)
+              predicate: (((-1.389 >= UDFToDouble(cint)) and (csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint))) or ((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint)))) (type: boolean)
               Statistics: Num rows: 3868 Data size: 831633 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double)
@@ -2031,7 +2031,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and ((UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
+              predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
               Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cdouble (type: double), cfloat (type: float)
@@ -2289,7 +2289,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and (cboolean2 is not null and (cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))))) or ((UDFToDouble(ctimestamp2) = -5.0) or (((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or ((cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint)))))))) (type: boolean)
+              predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ctimestamp1 (type: timestamp), cstring1 (type: string), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
@@ -2624,7 +2624,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and ((cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257))) or (((cint >= -257) and (cstring1 is not null and (cboolean1 >= 1))) or (cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))))) (type: boolean)
+              predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and cstring1 is not null and (cboolean1 >= 1)) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))) (type: boolean)
               Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_case.q.out b/ql/src/test/results/clientpositive/vectorized_case.q.out
index 9f547d1..73bf12d 100644
--- a/ql/src/test/results/clientpositive/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_case.q.out
@@ -46,7 +46,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((csmallint = 418) or ((csmallint = 12205) or (csmallint = 10583))) (type: boolean)
+              predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)


[50/50] [abbrv] hive git commit: HIVE-11568 : merge master into branch (Sergey Shelukhin) ADDENDUM MERGE

Posted by se...@apache.org.
HIVE-11568 : merge master into branch (Sergey Shelukhin) ADDENDUM MERGE


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2fe60861
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2fe60861
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2fe60861

Branch: refs/heads/hbase-metastore
Commit: 2fe60861db72a4128448245f8031d1839e5e3f8e
Parents: c528294 3071ce9
Author: Sergey Shelukhin <se...@apache.org>
Authored: Fri Aug 14 16:13:32 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Fri Aug 14 16:13:32 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/DDLSemanticAnalyzer.java   | 11 +++++++++++
 .../clientnegative/alter_table_wrong_location.q     |  4 ++++
 .../clientnegative/alter_table_wrong_location.q.out |  9 +++++++++
 .../apache/hive/spark/client/SparkClientImpl.java   |  4 ++++
 testutils/ptest2/pom.xml                            | 16 +---------------
 testutils/ptest2/src/main/resources/log4j2.xml      |  1 +
 6 files changed, 30 insertions(+), 15 deletions(-)
----------------------------------------------------------------------



[26/50] [abbrv] hive git commit: HIVE-11541: ORC: Split Strategy should depend on global file count, not per-partition (Gopal V reviewed by Prasanth Jayachandran)

Posted by se...@apache.org.
HIVE-11541: ORC: Split Strategy should depend on global file count, not per-partition (Gopal V reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f26b2569
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f26b2569
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f26b2569

Branch: refs/heads/hbase-metastore
Commit: f26b2569198fbeceaf17a5a77c59eccf5175935c
Parents: db46e6e
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Thu Aug 13 12:35:29 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Thu Aug 13 12:35:29 2015 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f26b2569/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 4e6dd7a..fe2eccd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -483,7 +483,6 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
     }
 
     private FileInfo verifyCachedFileInfo(FileStatus file) {
-      context.numFilesCounter.incrementAndGet();
       FileInfo fileInfo = Context.footerCache.getIfPresent(file.getPath());
       if (fileInfo != null) {
         if (isDebugEnabled) {
@@ -671,6 +670,7 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
 
         int numFiles = children.size();
         long avgFileSize = totalFileSize / numFiles;
+        int totalFiles = context.numFilesCounter.addAndGet(numFiles);
         switch(context.splitStrategyKind) {
           case BI:
             // BI strategy requested through config
@@ -684,7 +684,7 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
             break;
           default:
             // HYBRID strategy
-            if (avgFileSize > context.maxSize || numFiles <= context.minSplits) {
+            if (avgFileSize > context.maxSize || totalFiles <= context.minSplits) {
               splitStrategy = new ETLSplitStrategy(context, fs, dir, children, isOriginal, deltas,
                   covered);
             } else {


[08/50] [abbrv] hive git commit: HIVE-11387: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : fix reduce_deduplicate optimization (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez, Hari Subramaniyan)

Posted by se...@apache.org.
HIVE-11387: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : fix reduce_deduplicate optimization (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez, Hari Subramaniyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/538ae703
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/538ae703
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/538ae703

Branch: refs/heads/hbase-metastore
Commit: 538ae7036f2fe21e47e384523d48392d383e95e8
Parents: bddbd1d
Author: Hari Subramaniyan <ha...@apache.org>
Authored: Mon Aug 10 18:00:28 2015 -0700
Committer: Hari Subramaniyan <ha...@apache.org>
Committed: Mon Aug 10 18:00:28 2015 -0700

----------------------------------------------------------------------
 .../correlation/AbstractCorrelationProcCtx.java |   7 +
 .../correlation/CorrelationUtilities.java       |  11 +-
 .../correlation/ReduceSinkDeDuplication.java    |   6 +-
 ...i_insert_move_tasks_share_dependencies.q.out | 336 +++---------
 ql/src/test/results/clientpositive/ptf.q.out    |  27 +-
 ...i_insert_move_tasks_share_dependencies.q.out | 512 +++++++------------
 .../test/results/clientpositive/spark/ptf.q.out |  17 +-
 .../spark/union_remove_6_subq.q.out             |  22 +-
 .../clientpositive/spark/vectorized_ptf.q.out   |  21 +-
 .../clientpositive/tez/explainuser_1.q.out      |  69 ++-
 .../test/results/clientpositive/tez/ptf.q.out   |  15 +-
 .../clientpositive/tez/vectorized_ptf.q.out     |  19 +-
 .../clientpositive/union_remove_6_subq.q.out    |  34 +-
 .../results/clientpositive/vectorized_ptf.q.out |  67 +--
 14 files changed, 327 insertions(+), 836 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java
index 174685b..5b673df 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.optimizer.correlation;
 
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATIONMINREDUCER;
 import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESCRIPTOPERATORTRUST;
+import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE;
 
 import java.util.HashSet;
 import java.util.Set;
@@ -39,11 +40,13 @@ abstract class AbstractCorrelationProcCtx implements NodeProcessorCtx {
   // only one reducer if this configuration does not prevents
   private final int minReducer;
   private final Set<Operator<?>> removedOps;
+  private final boolean isMapAggr;
 
   public AbstractCorrelationProcCtx(ParseContext pctx) {
     removedOps = new HashSet<Operator<?>>();
     trustScript = pctx.getConf().getBoolVar(HIVESCRIPTOPERATORTRUST);
     minReducer = pctx.getConf().getIntVar(HIVEOPTREDUCEDEDUPLICATIONMINREDUCER);
+    isMapAggr = pctx.getConf().getBoolVar(HIVEMAPSIDEAGGREGATE);
     this.pctx = pctx;
   }
 
@@ -70,4 +73,8 @@ abstract class AbstractCorrelationProcCtx implements NodeProcessorCtx {
   public boolean addRemovedOperator(Operator<?> rsOp) {
     return removedOps.add(rsOp);
   }
+
+  public boolean isMapAggr() {
+    return isMapAggr;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
index 64bef21..7bb49be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
@@ -29,6 +29,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.ForwardOperator;
@@ -44,6 +45,7 @@ import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.Utilities.ReduceField;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.optimizer.correlation.ReduceSinkDeDuplication.ReduceSinkDeduplicateProcCtx;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.AggregationDesc;
@@ -163,10 +165,10 @@ public final class CorrelationUtilities {
     return type.isInstance(parent) ? (T)parent : null;
   }
 
-  protected static Operator<?> getStartForGroupBy(ReduceSinkOperator cRS)
+  protected static Operator<?> getStartForGroupBy(ReduceSinkOperator cRS, ReduceSinkDeduplicateProcCtx dedupCtx)
       throws SemanticException {
     Operator<? extends Serializable> parent = getSingleParent(cRS);
-    return parent instanceof GroupByOperator ? parent : cRS;  // skip map-aggr GBY
+    return parent instanceof GroupByOperator && dedupCtx.isMapAggr() ? parent : cRS;  // skip map-aggr GBY
   }
 
 
@@ -240,6 +242,7 @@ public final class CorrelationUtilities {
           || cursor instanceof FilterOperator
           || cursor instanceof ForwardOperator
           || cursor instanceof ScriptOperator
+          || cursor instanceof GroupByOperator
           || cursor instanceof ReduceSinkOperator)) {
         return null;
       }
@@ -395,7 +398,7 @@ public final class CorrelationUtilities {
 
     Operator<?> parent = getSingleParent(cRS);
 
-    if (parent instanceof GroupByOperator) {
+    if ((parent instanceof GroupByOperator) && procCtx.isMapAggr()) {
       // pRS-cGBYm-cRS-cGBYr (map aggregation) --> pRS-cGBYr(COMPLETE)
       // copies desc of cGBYm to cGBYr and remove cGBYm and cRS
       GroupByOperator cGBYm = (GroupByOperator) parent;
@@ -440,7 +443,7 @@ public final class CorrelationUtilities {
     removeOperator(cRS, cGBYr, parent, context);
     procCtx.addRemovedOperator(cRS);
 
-    if (parent instanceof GroupByOperator) {
+    if ((parent instanceof GroupByOperator) && procCtx.isMapAggr()) {
       removeOperator(parent, cGBYr, getSingleParent(parent), context);
       procCtx.addRemovedOperator(cGBYr);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
index 7b5f9b2..56334ed 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
@@ -500,7 +500,7 @@ public class ReduceSinkDeDuplication implements Transform {
     public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY,
         ReduceSinkDeduplicateProcCtx dedupCtx)
         throws SemanticException {
-      Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS);
+      Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS, dedupCtx);
       GroupByOperator pGBY =
           CorrelationUtilities.findPossibleParent(
               start, GroupByOperator.class, dedupCtx.trustScript());
@@ -547,7 +547,7 @@ public class ReduceSinkDeDuplication implements Transform {
     public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY,
         ReduceSinkDeduplicateProcCtx dedupCtx)
         throws SemanticException {
-      Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS);
+      Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS, dedupCtx);
       JoinOperator pJoin =
           CorrelationUtilities.findPossibleParent(
               start, JoinOperator.class, dedupCtx.trustScript());
@@ -590,7 +590,7 @@ public class ReduceSinkDeDuplication implements Transform {
     public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY,
         ReduceSinkDeduplicateProcCtx dedupCtx)
         throws SemanticException {
-      Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS);
+      Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS, dedupCtx);
       ReduceSinkOperator pRS =
           CorrelationUtilities.findPossibleParent(
               start, ReduceSinkOperator.class, dedupCtx.trustScript());

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
index 935ae75..81d9808 100644
--- a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
@@ -2821,11 +2821,10 @@ from src
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-3, Stage-5
-  Stage-0 depends on stages: Stage-3
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
+  Stage-3 depends on stages: Stage-2, Stage-4
+  Stage-0 depends on stages: Stage-2
+  Stage-4 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-4
 
 STAGE PLANS:
   Stage: Stage-2
@@ -2840,7 +2839,7 @@ STAGE PLANS:
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: key (type: string), value (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Forward
@@ -2855,10 +2854,11 @@ STAGE PLANS:
               Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Filter Operator
             predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
             Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -2875,29 +2875,6 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
     Dependency Collection
 
   Stage: Stage-0
@@ -2906,7 +2883,7 @@ STAGE PLANS:
           hdfs directory: false
 #### A masked pattern was here ####
 
-  Stage: Stage-5
+  Stage: Stage-4
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -2957,11 +2934,10 @@ from src
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-3, Stage-5
-  Stage-0 depends on stages: Stage-3
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
+  Stage-3 depends on stages: Stage-2, Stage-4
+  Stage-0 depends on stages: Stage-2
+  Stage-4 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-4
 
 STAGE PLANS:
   Stage: Stage-2
@@ -2976,7 +2952,7 @@ STAGE PLANS:
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: key (type: string), value (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Forward
@@ -2991,10 +2967,11 @@ STAGE PLANS:
               Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Filter Operator
             predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
             Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -3011,29 +2988,6 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
     Dependency Collection
 
   Stage: Stage-0
@@ -3042,7 +2996,7 @@ STAGE PLANS:
           hdfs directory: false
 #### A masked pattern was here ####
 
-  Stage: Stage-5
+  Stage: Stage-4
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -3093,11 +3047,10 @@ from src
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-3, Stage-5
-  Stage-0 depends on stages: Stage-3
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
+  Stage-3 depends on stages: Stage-2, Stage-4
+  Stage-0 depends on stages: Stage-2
+  Stage-4 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-4
 
 STAGE PLANS:
   Stage: Stage-2
@@ -3112,7 +3065,7 @@ STAGE PLANS:
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: key (type: string), value (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Forward
@@ -3127,10 +3080,11 @@ STAGE PLANS:
               Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Filter Operator
             predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
             Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -3147,29 +3101,6 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
     Dependency Collection
 
   Stage: Stage-0
@@ -3178,7 +3109,7 @@ STAGE PLANS:
           hdfs directory: false
 #### A masked pattern was here ####
 
-  Stage: Stage-5
+  Stage: Stage-4
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -3229,11 +3160,10 @@ from src
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-2 is a root stage
-  Stage-3 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-3, Stage-5
-  Stage-0 depends on stages: Stage-3
-  Stage-5 depends on stages: Stage-2
-  Stage-1 depends on stages: Stage-5
+  Stage-3 depends on stages: Stage-2, Stage-4
+  Stage-0 depends on stages: Stage-2
+  Stage-4 depends on stages: Stage-2
+  Stage-1 depends on stages: Stage-4
 
 STAGE PLANS:
   Stage: Stage-2
@@ -3248,7 +3178,7 @@ STAGE PLANS:
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: key (type: string), value (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Forward
@@ -3263,10 +3193,11 @@ STAGE PLANS:
               Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Filter Operator
             predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
             Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -3283,29 +3214,6 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
     Dependency Collection
 
   Stage: Stage-0
@@ -3314,7 +3222,7 @@ STAGE PLANS:
           hdfs directory: false
 #### A masked pattern was here ####
 
-  Stage: Stage-5
+  Stage: Stage-4
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -3369,15 +3277,14 @@ insert overwrite table src_multi2 select * where key > 10 and key < 20
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-4 is a root stage
-  Stage-6 depends on stages: Stage-4, Stage-8, Stage-9
+  Stage-6 depends on stages: Stage-4, Stage-8
   Stage-0 depends on stages: Stage-6
   Stage-5 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-6
   Stage-7 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-4
   Stage-8 depends on stages: Stage-4
-  Stage-2 depends on stages: Stage-8
-  Stage-9 depends on stages: Stage-4
-  Stage-3 depends on stages: Stage-9
+  Stage-3 depends on stages: Stage-8
 
 STAGE PLANS:
   Stage: Stage-4
@@ -3422,7 +3329,7 @@ STAGE PLANS:
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: key (type: string), value (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Forward
@@ -3437,10 +3344,11 @@ STAGE PLANS:
               Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Filter Operator
             predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
             Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -3485,36 +3393,13 @@ STAGE PLANS:
   Stage: Stage-7
     Stats-Aggr Operator
 
-  Stage: Stage-8
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-2
     Move Operator
       files:
           hdfs directory: false
 #### A masked pattern was here ####
 
-  Stage: Stage-9
+  Stage: Stage-8
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -3616,15 +3501,14 @@ insert overwrite table src_multi2 select * where key > 10 and key < 20
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-4 is a root stage
-  Stage-6 depends on stages: Stage-4, Stage-8, Stage-9
+  Stage-6 depends on stages: Stage-4, Stage-8
   Stage-0 depends on stages: Stage-6
   Stage-5 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-6
   Stage-7 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-4
   Stage-8 depends on stages: Stage-4
-  Stage-2 depends on stages: Stage-8
-  Stage-9 depends on stages: Stage-4
-  Stage-3 depends on stages: Stage-9
+  Stage-3 depends on stages: Stage-8
 
 STAGE PLANS:
   Stage: Stage-4
@@ -3669,7 +3553,7 @@ STAGE PLANS:
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: key (type: string), value (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Forward
@@ -3684,10 +3568,11 @@ STAGE PLANS:
               Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Filter Operator
             predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
             Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -3732,36 +3617,13 @@ STAGE PLANS:
   Stage: Stage-7
     Stats-Aggr Operator
 
-  Stage: Stage-8
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-2
     Move Operator
       files:
           hdfs directory: false
 #### A masked pattern was here ####
 
-  Stage: Stage-9
+  Stage: Stage-8
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -3865,7 +3727,7 @@ STAGE DEPENDENCIES:
   Stage-4 is a root stage
   Stage-11 depends on stages: Stage-4 , consists of Stage-8, Stage-7, Stage-9
   Stage-8
-  Stage-6 depends on stages: Stage-8, Stage-7, Stage-10, Stage-14, Stage-13, Stage-16, Stage-18, Stage-19
+  Stage-6 depends on stages: Stage-8, Stage-7, Stage-10, Stage-14, Stage-13, Stage-16, Stage-4, Stage-18
   Stage-0 depends on stages: Stage-6
   Stage-5 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-6
@@ -3878,10 +3740,9 @@ STAGE DEPENDENCIES:
   Stage-13
   Stage-15
   Stage-16 depends on stages: Stage-15
+  Stage-2 depends on stages: Stage-4
   Stage-18 depends on stages: Stage-4
-  Stage-2 depends on stages: Stage-18
-  Stage-19 depends on stages: Stage-4
-  Stage-3 depends on stages: Stage-19
+  Stage-3 depends on stages: Stage-18
 
 STAGE PLANS:
   Stage: Stage-4
@@ -3926,7 +3787,7 @@ STAGE PLANS:
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: key (type: string), value (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Forward
@@ -3941,10 +3802,11 @@ STAGE PLANS:
               Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Filter Operator
             predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
             Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -4067,36 +3929,13 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-18
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-2
     Move Operator
       files:
           hdfs directory: false
 #### A masked pattern was here ####
 
-  Stage: Stage-19
+  Stage: Stage-18
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -4200,7 +4039,7 @@ STAGE DEPENDENCIES:
   Stage-4 is a root stage
   Stage-11 depends on stages: Stage-4 , consists of Stage-8, Stage-7, Stage-9
   Stage-8
-  Stage-6 depends on stages: Stage-8, Stage-7, Stage-10, Stage-14, Stage-13, Stage-16, Stage-18, Stage-19
+  Stage-6 depends on stages: Stage-8, Stage-7, Stage-10, Stage-14, Stage-13, Stage-16, Stage-4, Stage-18
   Stage-0 depends on stages: Stage-6
   Stage-5 depends on stages: Stage-0
   Stage-1 depends on stages: Stage-6
@@ -4213,10 +4052,9 @@ STAGE DEPENDENCIES:
   Stage-13
   Stage-15
   Stage-16 depends on stages: Stage-15
+  Stage-2 depends on stages: Stage-4
   Stage-18 depends on stages: Stage-4
-  Stage-2 depends on stages: Stage-18
-  Stage-19 depends on stages: Stage-4
-  Stage-3 depends on stages: Stage-19
+  Stage-3 depends on stages: Stage-18
 
 STAGE PLANS:
   Stage: Stage-4
@@ -4261,7 +4099,7 @@ STAGE PLANS:
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: key (type: string), value (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Forward
@@ -4276,10 +4114,11 @@ STAGE PLANS:
               Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Filter Operator
             predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
             Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -4402,36 +4241,13 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-  Stage: Stage-18
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-2
     Move Operator
       files:
           hdfs directory: false
 #### A masked pattern was here ####
 
-  Stage: Stage-19
+  Stage: Stage-18
     Map Reduce
       Map Operator Tree:
           TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ptf.q.out b/ql/src/test/results/clientpositive/ptf.q.out
index e61703c..9d34e4e 100644
--- a/ql/src/test/results/clientpositive/ptf.q.out
+++ b/ql/src/test/results/clientpositive/ptf.q.out
@@ -880,8 +880,7 @@ POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -938,7 +937,7 @@ STAGE PLANS:
             Reduce Output Operator
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
               sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: int)
+              Map-reduce partition columns: _col0 (type: string)
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
@@ -946,28 +945,6 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: int)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
index 2bcf1bf..9bc6345 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
@@ -2353,10 +2353,8 @@ STAGE PLANS:
   Stage: Stage-2
     Spark
       Edges:
-        Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
-        Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -2370,35 +2368,9 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
                       sort order: ++
-                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 5 
+        Reducer 2 
             Reduce Operator Tree:
               Forward
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
@@ -2410,16 +2382,13 @@ STAGE PLANS:
                     mode: complete
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
-        Reducer 6 
-            Reduce Operator Tree:
-              Forward
-                Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 Filter Operator
                   predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                   Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -2434,6 +2403,19 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: string)
                       Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Dependency Collection
@@ -2480,10 +2462,8 @@ STAGE PLANS:
   Stage: Stage-2
     Spark
       Edges:
-        Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
-        Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -2497,35 +2477,9 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
                       sort order: ++
-                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 5 
+        Reducer 2 
             Reduce Operator Tree:
               Forward
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
@@ -2537,16 +2491,13 @@ STAGE PLANS:
                     mode: complete
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
-        Reducer 6 
-            Reduce Operator Tree:
-              Forward
-                Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 Filter Operator
                   predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                   Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -2561,6 +2512,19 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: string)
                       Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Dependency Collection
@@ -2607,10 +2571,8 @@ STAGE PLANS:
   Stage: Stage-2
     Spark
       Edges:
-        Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
-        Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -2624,35 +2586,9 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
                       sort order: ++
-                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 5 
+        Reducer 2 
             Reduce Operator Tree:
               Forward
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
@@ -2664,16 +2600,13 @@ STAGE PLANS:
                     mode: complete
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
-        Reducer 6 
-            Reduce Operator Tree:
-              Forward
-                Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 Filter Operator
                   predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                   Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -2688,6 +2621,19 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: string)
                       Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Dependency Collection
@@ -2734,10 +2680,8 @@ STAGE PLANS:
   Stage: Stage-2
     Spark
       Edges:
-        Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
-        Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -2751,35 +2695,9 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
                       sort order: ++
-                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 5 
+        Reducer 2 
             Reduce Operator Tree:
               Forward
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
@@ -2791,16 +2709,13 @@ STAGE PLANS:
                     mode: complete
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
-        Reducer 6 
-            Reduce Operator Tree:
-              Forward
-                Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 Filter Operator
                   predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                   Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -2815,6 +2730,19 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: string)
                       Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Dependency Collection
@@ -2869,10 +2797,8 @@ STAGE PLANS:
   Stage: Stage-4
     Spark
       Edges:
-        Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
-        Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -2916,35 +2842,9 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
                       sort order: ++
-                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 5 
+        Reducer 2 
             Reduce Operator Tree:
               Forward
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
@@ -2956,16 +2856,13 @@ STAGE PLANS:
                     mode: complete
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
-        Reducer 6 
-            Reduce Operator Tree:
-              Forward
-                Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 Filter Operator
                   predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                   Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -2980,6 +2877,19 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: string)
                       Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-5
     Dependency Collection
@@ -3107,10 +3017,8 @@ STAGE PLANS:
   Stage: Stage-4
     Spark
       Edges:
-        Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
-        Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -3154,35 +3062,9 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
                       sort order: ++
-                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 5 
+        Reducer 2 
             Reduce Operator Tree:
               Forward
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
@@ -3194,16 +3076,13 @@ STAGE PLANS:
                     mode: complete
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
-        Reducer 6 
-            Reduce Operator Tree:
-              Forward
-                Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 Filter Operator
                   predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                   Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -3218,6 +3097,19 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: string)
                       Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-5
     Dependency Collection
@@ -3345,10 +3237,8 @@ STAGE PLANS:
   Stage: Stage-4
     Spark
       Edges:
-        Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
-        Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -3392,35 +3282,9 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
                       sort order: ++
-                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 5 
+        Reducer 2 
             Reduce Operator Tree:
               Forward
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
@@ -3432,16 +3296,13 @@ STAGE PLANS:
                     mode: complete
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
-        Reducer 6 
-            Reduce Operator Tree:
-              Forward
-                Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 Filter Operator
                   predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                   Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -3456,6 +3317,19 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: string)
                       Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-5
     Dependency Collection
@@ -3583,10 +3457,8 @@ STAGE PLANS:
   Stage: Stage-4
     Spark
       Edges:
-        Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
-        Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -3630,35 +3502,9 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
                       sort order: ++
-                      Map-reduce partition columns: key (type: string), value (type: string)
+                      Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 5 
+        Reducer 2 
             Reduce Operator Tree:
               Forward
                 Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
@@ -3670,16 +3516,13 @@ STAGE PLANS:
                     mode: complete
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
-        Reducer 6 
-            Reduce Operator Tree:
-              Forward
-                Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 Filter Operator
                   predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                   Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
@@ -3694,6 +3537,19 @@ STAGE PLANS:
                       Map-reduce partition columns: _col1 (type: string)
                       Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: string)
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-5
     Dependency Collection

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/spark/ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ptf.q.out b/ql/src/test/results/clientpositive/spark/ptf.q.out
index 647b83e..6beeaf4 100644
--- a/ql/src/test/results/clientpositive/spark/ptf.q.out
+++ b/ql/src/test/results/clientpositive/spark/ptf.q.out
@@ -868,8 +868,7 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (GROUP, 2)
-        Reducer 4 <- Reducer 3 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (GROUP PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -915,7 +914,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
                         sort order: +++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: int)
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
@@ -924,18 +923,6 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: int)
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int)
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
                       Input definition

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out
index fe95bf2..372971c 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out
@@ -383,9 +383,8 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 1 (GROUP, 2)
-        Reducer 6 <- Map 1 (GROUP, 2)
-        Reducer 3 <- Reducer 2 (GROUP, 2), Reducer 6 (GROUP, 2)
-        Reducer 4 <- Reducer 3 (PARTITION-LEVEL SORT, 2)
+        Reducer 5 <- Map 1 (GROUP, 2)
+        Reducer 3 <- Reducer 2 (GROUP PARTITION-LEVEL SORT, 2), Reducer 5 (GROUP PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -429,7 +428,7 @@ STAGE PLANS:
                     Reduce Output Operator
                       key expressions: _col0 (type: string), _col1 (type: bigint)
                       sort order: ++
-                      Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
+                      Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
@@ -438,17 +437,6 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: bigint)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: bigint)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
                       Input definition
@@ -480,7 +468,7 @@ STAGE PLANS:
                           input format: org.apache.hadoop.mapred.TextInputFormat
                           output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 6 
+        Reducer 5 
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -496,7 +484,7 @@ STAGE PLANS:
                   Reduce Output Operator
                     key expressions: _col0 (type: string), _col1 (type: bigint)
                     sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
+                    Map-reduce partition columns: _col0 (type: string)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0


[05/50] [abbrv] hive git commit: HIVE-11376 : Removes legacy code wrt skipping files with codec to HiveInputFormat from CombineHiveInputFormat (Rajat Khandelwal, reviewed by Amareshwari)

Posted by se...@apache.org.
HIVE-11376 : Removes legacy code wrt skipping files with codec to HiveInputFormat from CombineHiveInputFormat (Rajat Khandelwal, reviewed by Amareshwari)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cfda5700
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cfda5700
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cfda5700

Branch: refs/heads/hbase-metastore
Commit: cfda5700a715b71f5c4c6b325d4adaa213ff7618
Parents: 6df52ed
Author: Rajat Khandelwal <pr...@apache.org>
Authored: Mon Aug 10 17:51:09 2015 +0530
Committer: Amareshwari Sriramadasu <am...@apache.org>
Committed: Mon Aug 10 17:51:09 2015 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  2 -
 .../hive/ql/io/CombineHiveInputFormat.java      | 39 --------------------
 .../hive/ql/optimizer/GenMapRedUtils.java       |  2 -
 .../org/apache/hadoop/hive/ql/plan/MapWork.java | 10 -----
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |  9 +----
 5 files changed, 1 insertion(+), 61 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cfda5700/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 36bb394..9cc7987 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1175,8 +1175,6 @@ public class HiveConf extends Configuration {
     HIVEROWOFFSET("hive.exec.rowoffset", false,
         "Whether to provide the row offset virtual column"),
 
-    HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE("hive.hadoop.supports.splittable.combineinputformat", false, ""),
-
     // Optimizer
     HIVEOPTINDEXFILTER("hive.optimize.index.filter", false,
         "Whether to enable automatic use of indexes"),

http://git-wip-us.apache.org/repos/asf/hive/blob/cfda5700/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
index e13c4dd..11740d1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
@@ -374,45 +374,6 @@ public class CombineHiveInputFormat<K extends WritableComparable, V extends Writ
       }
       FileSystem inpFs = path.getFileSystem(job);
 
-      // Since there is no easy way of knowing whether MAPREDUCE-1597 is present in the tree or not,
-      // we use a configuration variable for the same
-      if (this.mrwork != null && !this.mrwork.getHadoopSupportsSplittable()) {
-        // The following code should be removed, once
-        // https://issues.apache.org/jira/browse/MAPREDUCE-1597 is fixed.
-        // Hadoop does not handle non-splittable files correctly for CombineFileInputFormat,
-        // so don't use CombineFileInputFormat for non-splittable files
-
-        //ie, dont't combine if inputformat is a TextInputFormat and has compression turned on
-
-        if (inputFormat instanceof TextInputFormat) {
-          Queue<Path> dirs = new LinkedList<Path>();
-          FileStatus fStats = inpFs.getFileStatus(path);
-
-          // If path is a directory
-          if (fStats.isDir()) {
-            dirs.offer(path);
-          } else if ((new CompressionCodecFactory(job)).getCodec(path) != null) {
-            //if compresssion codec is set, use HiveInputFormat.getSplits (don't combine)
-            splits = super.getSplits(job, numSplits);
-            return splits;
-          }
-
-          while (dirs.peek() != null) {
-            Path tstPath = dirs.remove();
-            FileStatus[] fStatus = inpFs.listStatus(tstPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
-            for (int idx = 0; idx < fStatus.length; idx++) {
-              if (fStatus[idx].isDir()) {
-                dirs.offer(fStatus[idx].getPath());
-              } else if ((new CompressionCodecFactory(job)).getCodec(
-                  fStatus[idx].getPath()) != null) {
-                //if compresssion codec is set, use HiveInputFormat.getSplits (don't combine)
-                splits = super.getSplits(job, numSplits);
-                return splits;
-              }
-            }
-          }
-        }
-      }
       //don't combine if inputformat is a SymlinkTextInputFormat
       if (inputFormat instanceof SymlinkTextInputFormat) {
         splits = super.getSplits(job, numSplits);

http://git-wip-us.apache.org/repos/asf/hive/blob/cfda5700/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index 693d8c7..4a325fb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -933,8 +933,6 @@ public final class GenMapRedUtils {
     work.setPathToAliases(new LinkedHashMap<String, ArrayList<String>>());
     work.setPathToPartitionInfo(new LinkedHashMap<String, PartitionDesc>());
     work.setAliasToWork(new LinkedHashMap<String, Operator<? extends OperatorDesc>>());
-    work.setHadoopSupportsSplittable(
-        conf.getBoolVar(HiveConf.ConfVars.HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE));
     return mrWork;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cfda5700/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
index 2cb9257..bc9b645 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
@@ -63,8 +63,6 @@ public class MapWork extends BaseWork {
 
   private static final Log LOG = LogFactory.getLog(MapWork.class);
 
-  private boolean hadoopSupportsSplittable;
-
   // use LinkedHashMap to make sure the iteration order is
   // deterministic, to ease testing
   private LinkedHashMap<String, ArrayList<String>> pathToAliases = new LinkedHashMap<String, ArrayList<String>>();
@@ -421,14 +419,6 @@ public class MapWork extends BaseWork {
     return this.mapperCannotSpanPartns;
   }
 
-  public boolean getHadoopSupportsSplittable() {
-    return hadoopSupportsSplittable;
-  }
-
-  public void setHadoopSupportsSplittable(boolean hadoopSupportsSplittable) {
-    this.hadoopSupportsSplittable = hadoopSupportsSplittable;
-  }
-
   public String getIndexIntermediateFile() {
     return indexIntermediateFile;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/cfda5700/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index 76926e7..b50eaab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -99,14 +99,7 @@ public final class PlanUtils {
 
   @SuppressWarnings("nls")
   public static MapredWork getMapRedWork() {
-    try {
-      MapredWork work = new MapredWork();
-      work.getMapWork().setHadoopSupportsSplittable(Hive.get().getConf().getBoolVar(
-          HiveConf.ConfVars.HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE));
-      return work;
-    } catch (HiveException ex) {
-      throw new RuntimeException(ex);
-    }
+    return new MapredWork();
   }
 
   public static TableDesc getDefaultTableDesc(CreateTableDesc directoryDesc,


[49/50] [abbrv] hive git commit: HIVE-11568 : merge master into branch (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11568 : merge master into branch (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c528294b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c528294b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c528294b

Branch: refs/heads/hbase-metastore
Commit: c528294bc99c8ba05dd0a3c3f39cc06fb27b9473
Parents: 0fa45e4 e8b2c60
Author: Sergey Shelukhin <se...@apache.org>
Authored: Fri Aug 14 15:58:43 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Fri Aug 14 15:58:43 2015 -0700

----------------------------------------------------------------------
 accumulo-handler/pom.xml                        |     4 -
 .../apache/hadoop/hive/ant/GenVectorCode.java   |   105 +
 .../src/main/resources/beeline-log4j.properties |    24 -
 beeline/src/main/resources/beeline-log4j2.xml   |    40 +
 bin/ext/beeline.sh                              |     2 +-
 bin/hive                                        |     3 +
 .../hadoop/hive/cli/TestOptionsProcessor.java   |     1 -
 common/pom.xml                                  |    27 +-
 .../apache/hadoop/hive/common/JavaUtils.java    |    11 +-
 .../org/apache/hadoop/hive/common/LogUtils.java |    18 +-
 .../hadoop/hive/common/ValidReadTxnList.java    |     2 +-
 .../hadoop/hive/common/type/HiveDecimal.java    |   306 -
 .../org/apache/hadoop/hive/conf/HiveConf.java   |    27 +-
 common/src/main/resources/hive-log4j.properties |    88 -
 common/src/main/resources/hive-log4j2.xml       |   111 +
 .../hadoop/hive/conf/TestHiveLogging.java       |     8 +-
 .../resources/hive-exec-log4j-test.properties   |    59 -
 .../test/resources/hive-exec-log4j2-test.xml    |    86 +
 .../test/resources/hive-log4j-test.properties   |    71 -
 common/src/test/resources/hive-log4j2-test.xml  |    95 +
 data/conf/hive-log4j-old.properties             |    82 -
 data/conf/hive-log4j.properties                 |    97 -
 data/conf/hive-log4j2.xml                       |   148 +
 data/conf/spark/log4j.properties                |    24 -
 data/conf/spark/log4j2.xml                      |    74 +
 docs/xdocs/language_manual/cli.xml              |     2 +-
 .../test/results/positive/hbase_timestamp.q.out |     8 +-
 hcatalog/bin/hcat_server.sh                     |     2 +-
 hcatalog/bin/templeton.cmd                      |     4 +-
 .../mapreduce/DefaultOutputFormatContainer.java |     7 +-
 ...namicPartitionFileRecordWriterContainer.java |     3 +-
 .../mapreduce/FileOutputFormatContainer.java    |     3 +-
 .../hive/hcatalog/mapreduce/PartInfo.java       |    32 +-
 .../hive/hcatalog/mapreduce/SpecialCases.java   |     8 +-
 .../mapreduce/TestHCatMultiOutputFormat.java    |     6 +-
 hcatalog/scripts/hcat_server_start.sh           |     2 +-
 .../content/xdocs/configuration.xml             |     2 +-
 .../src/documentation/content/xdocs/install.xml |     2 +-
 .../deployers/config/hive/hive-log4j.properties |    88 -
 .../deployers/config/hive/hive-log4j2.xml       |   111 +
 .../templeton/deployers/start_hive_services.sh  |     2 +-
 .../webhcat/svr/src/main/bin/webhcat_server.sh  |     4 +-
 .../src/main/config/webhcat-log4j.properties    |    45 -
 .../svr/src/main/config/webhcat-log4j2.xml      |    75 +
 .../antlr4/org/apache/hive/hplsql/Hplsql.g4     |   164 +-
 .../main/java/org/apache/hive/hplsql/Conn.java  |     6 +
 .../java/org/apache/hive/hplsql/Converter.java  |    41 +-
 .../main/java/org/apache/hive/hplsql/Exec.java  |    65 +-
 .../java/org/apache/hive/hplsql/Expression.java |    73 +-
 .../main/java/org/apache/hive/hplsql/Meta.java  |    98 +
 .../main/java/org/apache/hive/hplsql/Query.java |    55 +
 .../java/org/apache/hive/hplsql/Select.java     |    47 +-
 .../java/org/apache/hive/hplsql/Signal.java     |     2 +-
 .../main/java/org/apache/hive/hplsql/Stmt.java  |    97 +-
 .../main/java/org/apache/hive/hplsql/Var.java   |    43 +-
 .../apache/hive/hplsql/functions/Function.java  |    49 +-
 .../hive/hplsql/functions/FunctionDatetime.java |    14 +-
 .../hive/hplsql/functions/FunctionMisc.java     |    22 +-
 .../hive/hplsql/functions/FunctionOra.java      |    31 +-
 .../hive/hplsql/functions/FunctionString.java   |    46 +-
 .../org/apache/hive/hplsql/TestHplsqlLocal.java |     5 +
 .../apache/hive/hplsql/TestHplsqlOffline.java   |    76 +
 .../test/queries/db/create_procedure_mssql.sql  |    52 +
 .../src/test/queries/db/cursor_attributes.sql   |    60 +
 hplsql/src/test/queries/db/map_object.sql       |     9 +
 hplsql/src/test/queries/db/select_into.sql      |    17 +
 .../src/test/queries/db/set_current_schema.sql  |     6 +
 hplsql/src/test/queries/db/sys_refcursor.sql    |    65 +
 hplsql/src/test/queries/db/use.sql              |     2 +
 .../queries/local/exception_divide_by_zero.sql  |    11 +
 .../test/queries/offline/create_table_mssql.sql |    43 +
 .../test/queries/offline/create_table_ora.sql   |     4 +
 .../results/db/create_procedure_mssql.out.txt   |    45 +
 .../test/results/db/cursor_attributes.out.txt   |    33 +
 hplsql/src/test/results/db/map_object.out.txt   |    17 +
 hplsql/src/test/results/db/select_into.out.txt  |    19 +
 .../test/results/db/set_current_schema.out.txt  |    12 +
 .../src/test/results/db/sys_refcursor.out.txt   |    36 +
 hplsql/src/test/results/db/use.out.txt          |     4 +
 .../test/results/local/create_function.out.txt  |     4 +-
 hplsql/src/test/results/local/declare.out.txt   |     4 +-
 .../local/exception_divide_by_zero.out.txt      |     8 +
 .../results/offline/create_table_mssql.out.txt  |    24 +
 .../results/offline/create_table_ora.out.txt    |     4 +
 .../hive/metastore/TestHiveMetaStore.java       |    96 +-
 .../operation/TestOperationLoggingLayout.java   |   136 +
 itests/pom.xml                                  |     2 +-
 itests/qtest-spark/pom.xml                      |    24 +
 itests/qtest/pom.xml                            |    28 +-
 .../test/resources/testconfiguration.properties |    50 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |    62 +-
 jdbc/pom.xml                                    |     1 +
 metastore/if/hive_metastore.thrift              |     5 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |  5526 ++-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  7970 ++--
 .../ThriftHiveMetastore_server.skeleton.cpp     |     5 +
 .../thrift/gen-cpp/hive_metastore_constants.cpp |     2 +-
 .../thrift/gen-cpp/hive_metastore_constants.h   |     2 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  6204 ++-
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |  2666 +-
 .../hive/metastore/api/AbortTxnRequest.java     |    24 +-
 .../metastore/api/AddDynamicPartitions.java     |    69 +-
 .../metastore/api/AddPartitionsRequest.java     |    80 +-
 .../hive/metastore/api/AddPartitionsResult.java |    48 +-
 .../hadoop/hive/metastore/api/AggrStats.java    |    54 +-
 .../metastore/api/AlreadyExistsException.java   |    24 +-
 .../metastore/api/BinaryColumnStatsData.java    |    40 +-
 .../metastore/api/BooleanColumnStatsData.java   |    40 +-
 .../hive/metastore/api/CheckLockRequest.java    |    24 +-
 .../hive/metastore/api/ColumnStatistics.java    |    54 +-
 .../metastore/api/ColumnStatisticsData.java     |    20 +-
 .../metastore/api/ColumnStatisticsDesc.java     |    58 +-
 .../hive/metastore/api/ColumnStatisticsObj.java |    40 +-
 .../hive/metastore/api/CommitTxnRequest.java    |    24 +-
 .../hive/metastore/api/CompactionRequest.java   |    62 +-
 .../hive/metastore/api/CompactionType.java      |     2 +-
 .../api/ConfigValSecurityException.java         |    24 +-
 .../api/CurrentNotificationEventId.java         |    24 +-
 .../hadoop/hive/metastore/api/Database.java     |   115 +-
 .../apache/hadoop/hive/metastore/api/Date.java  |    24 +-
 .../hive/metastore/api/DateColumnStatsData.java |    50 +-
 .../hadoop/hive/metastore/api/Decimal.java      |    41 +-
 .../metastore/api/DecimalColumnStatsData.java   |    50 +-
 .../metastore/api/DoubleColumnStatsData.java    |    50 +-
 .../hive/metastore/api/DropPartitionsExpr.java  |    43 +-
 .../metastore/api/DropPartitionsRequest.java    |    82 +-
 .../metastore/api/DropPartitionsResult.java     |    48 +-
 .../hive/metastore/api/EnvironmentContext.java  |    61 +-
 .../hive/metastore/api/EventRequestType.java    |     2 +-
 .../hadoop/hive/metastore/api/FieldSchema.java  |    58 +-
 .../hive/metastore/api/FireEventRequest.java    |    79 +-
 .../metastore/api/FireEventRequestData.java     |    20 +-
 .../hive/metastore/api/FireEventResponse.java   |    16 +-
 .../hadoop/hive/metastore/api/Function.java     |   110 +-
 .../hadoop/hive/metastore/api/FunctionType.java |     2 +-
 .../metastore/api/GetAllFunctionsResponse.java  |   447 +
 .../metastore/api/GetOpenTxnsInfoResponse.java  |    54 +-
 .../hive/metastore/api/GetOpenTxnsResponse.java |    53 +-
 .../api/GetPrincipalsInRoleRequest.java         |    24 +-
 .../api/GetPrincipalsInRoleResponse.java        |    46 +-
 .../api/GetRoleGrantsForPrincipalRequest.java   |    36 +-
 .../api/GetRoleGrantsForPrincipalResponse.java  |    46 +-
 .../api/GrantRevokePrivilegeRequest.java        |    46 +-
 .../api/GrantRevokePrivilegeResponse.java       |    26 +-
 .../metastore/api/GrantRevokeRoleRequest.java   |    86 +-
 .../metastore/api/GrantRevokeRoleResponse.java  |    26 +-
 .../hive/metastore/api/GrantRevokeType.java     |     2 +-
 .../hive/metastore/api/HeartbeatRequest.java    |    34 +-
 .../metastore/api/HeartbeatTxnRangeRequest.java |    32 +-
 .../api/HeartbeatTxnRangeResponse.java          |    74 +-
 .../hive/metastore/api/HiveObjectPrivilege.java |    52 +-
 .../hive/metastore/api/HiveObjectRef.java       |    81 +-
 .../hive/metastore/api/HiveObjectType.java      |     2 +-
 .../apache/hadoop/hive/metastore/api/Index.java |   133 +-
 .../api/IndexAlreadyExistsException.java        |    24 +-
 .../metastore/api/InsertEventRequestData.java   |    45 +-
 .../metastore/api/InvalidInputException.java    |    24 +-
 .../metastore/api/InvalidObjectException.java   |    24 +-
 .../api/InvalidOperationException.java          |    24 +-
 .../api/InvalidPartitionException.java          |    24 +-
 .../hive/metastore/api/LockComponent.java       |    66 +-
 .../hadoop/hive/metastore/api/LockLevel.java    |     2 +-
 .../hadoop/hive/metastore/api/LockRequest.java  |    72 +-
 .../hadoop/hive/metastore/api/LockResponse.java |    36 +-
 .../hadoop/hive/metastore/api/LockState.java    |     2 +-
 .../hadoop/hive/metastore/api/LockType.java     |     2 +-
 .../hive/metastore/api/LongColumnStatsData.java |    50 +-
 .../hive/metastore/api/MetaException.java       |    24 +-
 .../hive/metastore/api/NoSuchLockException.java |    24 +-
 .../metastore/api/NoSuchObjectException.java    |    24 +-
 .../hive/metastore/api/NoSuchTxnException.java  |    24 +-
 .../hive/metastore/api/NotificationEvent.java   |    66 +-
 .../metastore/api/NotificationEventRequest.java |    34 +-
 .../api/NotificationEventResponse.java          |    46 +-
 .../hive/metastore/api/OpenTxnRequest.java      |    40 +-
 .../hive/metastore/api/OpenTxnsResponse.java    |    45 +-
 .../apache/hadoop/hive/metastore/api/Order.java |    32 +-
 .../hadoop/hive/metastore/api/Partition.java    |   156 +-
 .../hive/metastore/api/PartitionEventType.java  |     2 +-
 .../api/PartitionListComposingSpec.java         |    46 +-
 .../hive/metastore/api/PartitionSpec.java       |    58 +-
 .../api/PartitionSpecWithSharedSD.java          |    54 +-
 .../hive/metastore/api/PartitionWithoutSD.java  |   124 +-
 .../metastore/api/PartitionsByExprRequest.java  |    67 +-
 .../metastore/api/PartitionsByExprResult.java   |    54 +-
 .../metastore/api/PartitionsStatsRequest.java   |    90 +-
 .../metastore/api/PartitionsStatsResult.java    |    72 +-
 .../metastore/api/PrincipalPrivilegeSet.java    |   184 +-
 .../hive/metastore/api/PrincipalType.java       |     2 +-
 .../hadoop/hive/metastore/api/PrivilegeBag.java |    46 +-
 .../hive/metastore/api/PrivilegeGrantInfo.java  |    60 +-
 .../hive/metastore/api/RequestPartsSpec.java    |    56 +-
 .../hadoop/hive/metastore/api/ResourceType.java |     2 +-
 .../hadoop/hive/metastore/api/ResourceUri.java  |    36 +-
 .../apache/hadoop/hive/metastore/api/Role.java  |    40 +-
 .../hive/metastore/api/RolePrincipalGrant.java  |    80 +-
 .../hadoop/hive/metastore/api/Schema.java       |    91 +-
 .../hadoop/hive/metastore/api/SerDeInfo.java    |    93 +-
 .../api/SetPartitionsStatsRequest.java          |    46 +-
 .../hive/metastore/api/ShowCompactRequest.java  |    16 +-
 .../hive/metastore/api/ShowCompactResponse.java |    46 +-
 .../api/ShowCompactResponseElement.java         |    86 +-
 .../hive/metastore/api/ShowLocksRequest.java    |    16 +-
 .../hive/metastore/api/ShowLocksResponse.java   |    46 +-
 .../metastore/api/ShowLocksResponseElement.java |   114 +-
 .../hadoop/hive/metastore/api/SkewedInfo.java   |   147 +-
 .../hive/metastore/api/StorageDescriptor.java   |   242 +-
 .../metastore/api/StringColumnStatsData.java    |    48 +-
 .../apache/hadoop/hive/metastore/api/Table.java |   189 +-
 .../hive/metastore/api/TableStatsRequest.java   |    61 +-
 .../hive/metastore/api/TableStatsResult.java    |    46 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 33504 +++++++++++------
 .../hive/metastore/api/TxnAbortedException.java |    24 +-
 .../hadoop/hive/metastore/api/TxnInfo.java      |    52 +-
 .../hive/metastore/api/TxnOpenException.java    |    24 +-
 .../hadoop/hive/metastore/api/TxnState.java     |     2 +-
 .../apache/hadoop/hive/metastore/api/Type.java  |    72 +-
 .../hive/metastore/api/UnknownDBException.java  |    24 +-
 .../api/UnknownPartitionException.java          |    24 +-
 .../metastore/api/UnknownTableException.java    |    24 +-
 .../hive/metastore/api/UnlockRequest.java       |    24 +-
 .../hadoop/hive/metastore/api/Version.java      |    32 +-
 .../metastore/api/hive_metastoreConstants.java  |     7 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   |  4599 ++-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  1184 +-
 .../hive_metastore/ThriftHiveMetastore-remote   |   620 +-
 .../hive_metastore/ThriftHiveMetastore.py       |  4624 ++-
 .../thrift/gen-py/hive_metastore/constants.py   |     2 +-
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |   856 +-
 .../thrift/gen-rb/hive_metastore_constants.rb   |     2 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |    18 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |    62 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |    34 +-
 .../hive/metastore/HiveMetaStoreClient.java     |     7 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |     4 +
 .../hadoop/hive/metastore/ObjectStore.java      |    35 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |     7 +
 .../hadoop/hive/metastore/hbase/HBaseStore.java |    16 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |    32 +-
 .../metastore/txn/ValidCompactorTxnList.java    |     2 +-
 .../DummyRawStoreControlledCommit.java          |     7 +
 .../DummyRawStoreForJdoConnection.java          |     6 +
 .../metastore/txn/TestCompactionTxnHandler.java |    40 +-
 .../hive/metastore/txn/TestTxnHandler.java      |    66 +-
 packaging/src/main/assembly/bin.xml             |    17 +-
 pom.xml                                         |    52 +-
 ql/if/queryplan.thrift                          |     1 +
 ql/pom.xml                                      |    24 +-
 .../gen/thrift/gen-cpp/queryplan_constants.cpp  |     2 +-
 ql/src/gen/thrift/gen-cpp/queryplan_constants.h |     2 +-
 ql/src/gen/thrift/gen-cpp/queryplan_types.cpp   |   796 +-
 ql/src/gen/thrift/gen-cpp/queryplan_types.h     |   294 +-
 .../hadoop/hive/ql/plan/api/Adjacency.java      |    65 +-
 .../hadoop/hive/ql/plan/api/AdjacencyType.java  |     2 +-
 .../apache/hadoop/hive/ql/plan/api/Graph.java   |    87 +-
 .../hadoop/hive/ql/plan/api/NodeType.java       |     2 +-
 .../hadoop/hive/ql/plan/api/Operator.java       |   142 +-
 .../hadoop/hive/ql/plan/api/OperatorType.java   |     7 +-
 .../apache/hadoop/hive/ql/plan/api/Query.java   |   176 +-
 .../hadoop/hive/ql/plan/api/QueryPlan.java      |    62 +-
 .../apache/hadoop/hive/ql/plan/api/Stage.java   |   172 +-
 .../hadoop/hive/ql/plan/api/StageType.java      |     2 +-
 .../apache/hadoop/hive/ql/plan/api/Task.java    |   182 +-
 .../hadoop/hive/ql/plan/api/TaskType.java       |     2 +-
 ql/src/gen/thrift/gen-php/Types.php             |   119 +-
 ql/src/gen/thrift/gen-py/queryplan/constants.py |     2 +-
 ql/src/gen/thrift/gen-py/queryplan/ttypes.py    |    87 +-
 ql/src/gen/thrift/gen-rb/queryplan_constants.rb |     2 +-
 ql/src/gen/thrift/gen-rb/queryplan_types.rb     |     7 +-
 ...tringGroupColumnCompareStringGroupColumn.txt |   112 +-
 ...gGroupColumnCompareStringGroupScalarBase.txt |    12 +-
 ...gGroupScalarCompareStringGroupColumnBase.txt |    12 +-
 ...tringGroupColumnCompareStringGroupColumn.txt |   112 +-
 ...gGroupColumnCompareStringGroupScalarBase.txt |    12 +-
 ...gGroupScalarCompareStringGroupColumnBase.txt |    12 +-
 .../java/org/apache/hadoop/hive/ql/Context.java |     1 -
 .../java/org/apache/hadoop/hive/ql/Driver.java  |   209 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |    13 +-
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |    18 +-
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |     2 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |     3 +-
 .../hive/ql/exec/HashTableSinkOperator.java     |     6 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |     4 +-
 .../apache/hadoop/hive/ql/exec/Operator.java    |    25 -
 .../hadoop/hive/ql/exec/OperatorFactory.java    |    11 +
 .../ql/exec/SparkHashTableSinkOperator.java     |    17 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |    36 +-
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |    29 +-
 .../hive/ql/exec/mr/HadoopJobExecHelper.java    |    20 +-
 .../persistence/HybridHashTableContainer.java   |     6 +
 .../persistence/MapJoinTableContainerSerDe.java |    63 +-
 .../hive/ql/exec/spark/HashTableLoader.java     |    26 +-
 .../ql/exec/spark/HiveSparkClientFactory.java   |    10 +-
 .../hive/ql/exec/spark/KryoSerializer.java      |     4 +
 .../ql/exec/spark/RemoteHiveSparkClient.java    |    57 +-
 .../exec/spark/SparkDynamicPartitionPruner.java |   268 +
 .../hadoop/hive/ql/exec/spark/SparkPlan.java    |     3 -
 .../hive/ql/exec/spark/SparkPlanGenerator.java  |    15 +-
 .../hadoop/hive/ql/exec/spark/SparkTask.java    |     1 +
 .../hive/ql/exec/spark/SparkUtilities.java      |    56 +
 .../spark/status/impl/LocalSparkJobStatus.java  |     2 +-
 .../spark/status/impl/RemoteSparkJobStatus.java |     2 +-
 .../hive/ql/exec/vector/BytesColumnVector.java  |   347 -
 .../hive/ql/exec/vector/ColumnVector.java       |   178 -
 .../ql/exec/vector/DecimalColumnVector.java     |   125 -
 .../hive/ql/exec/vector/DoubleColumnVector.java |   161 -
 .../hive/ql/exec/vector/LongColumnVector.java   |   205 -
 .../VectorSparkHashTableSinkOperator.java       |   104 +
 ...VectorSparkPartitionPruningSinkOperator.java |    99 +
 .../ql/exec/vector/VectorizationContext.java    |    51 +-
 .../hive/ql/exec/vector/VectorizedRowBatch.java |   186 -
 .../BRoundWithNumDigitsDoubleToDouble.java      |    42 +
 .../ql/exec/vector/expressions/DecimalUtil.java |    18 +
 .../vector/expressions/FilterExprAndExpr.java   |     8 +-
 .../vector/expressions/FilterExprOrExpr.java    |   140 +-
 ...FuncBRoundWithNumDigitsDecimalToDecimal.java |    40 +
 .../FuncRoundWithNumDigitsDecimalToDecimal.java |    14 +-
 .../ql/exec/vector/expressions/MathExpr.java    |    22 +
 .../ql/exec/vector/expressions/StringExpr.java  |    51 +
 .../hadoop/hive/ql/hooks/LineageInfo.java       |     9 +-
 .../hadoop/hive/ql/hooks/LineageLogger.java     |    44 +-
 .../hive/ql/hooks/PostExecOrcFileDump.java      |   120 +
 .../ql/hooks/PostExecTezSummaryPrinter.java     |    72 +
 .../hive/ql/io/CombineHiveInputFormat.java      |    98 +-
 .../hadoop/hive/ql/io/HiveInputFormat.java      |    46 +-
 .../apache/hadoop/hive/ql/io/orc/FileDump.java  |     4 +
 .../apache/hadoop/hive/ql/io/orc/OrcConf.java   |   129 +-
 .../apache/hadoop/hive/ql/io/orc/OrcFile.java   |   141 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |    38 +-
 .../hadoop/hive/ql/io/orc/OrcOutputFormat.java  |    67 +-
 .../apache/hadoop/hive/ql/io/orc/OrcSerde.java  |     6 +-
 .../apache/hadoop/hive/ql/io/orc/Reader.java    |     7 +
 .../hadoop/hive/ql/io/orc/ReaderImpl.java       |     5 +-
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |    48 +-
 .../hive/ql/io/orc/TreeReaderFactory.java       |    12 +-
 .../parquet/read/DataWritableReadSupport.java   |    10 +-
 .../read/ParquetFilterPredicateConverter.java   |   148 +
 .../read/ParquetRecordReaderWrapper.java        |   125 +-
 .../ql/io/rcfile/stats/PartialScanTask.java     |    20 +-
 .../hive/ql/io/sarg/ConvertAstToSearchArg.java  |   439 +
 .../hive/ql/io/sarg/SearchArgumentFactory.java  |    56 -
 .../hive/ql/io/sarg/SearchArgumentImpl.java     |  1027 -
 .../hadoop/hive/ql/lib/PreOrderOnceWalker.java  |    44 +
 .../hadoop/hive/ql/lockmgr/DbLockManager.java   |    12 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    |    71 +-
 .../hadoop/hive/ql/lockmgr/DummyTxnManager.java |     8 +
 .../hadoop/hive/ql/lockmgr/HiveTxnManager.java  |    21 +
 .../hive/ql/lockmgr/HiveTxnManagerImpl.java     |    10 +
 .../hadoop/hive/ql/lockmgr/LockException.java   |     8 +-
 .../hadoop/hive/ql/log/HiveEventCounter.java    |   135 +
 .../apache/hadoop/hive/ql/log/NullAppender.java |    63 +
 .../ql/log/PidDailyRollingFileAppender.java     |    33 -
 .../hive/ql/log/PidFilePatternConverter.java    |    62 +
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   107 +-
 .../hadoop/hive/ql/metadata/HiveException.java  |     3 +
 .../hadoop/hive/ql/metadata/Partition.java      |     2 +-
 .../hadoop/hive/ql/metadata/TableIterable.java  |   104 +
 .../hadoop/hive/ql/optimizer/ColumnPruner.java  |     4 +
 .../hive/ql/optimizer/ColumnPrunerProcCtx.java  |    95 +-
 .../ql/optimizer/ColumnPrunerProcFactory.java   |    42 +-
 .../optimizer/ConstantPropagateProcFactory.java |   123 +-
 .../DynamicPartitionPruningOptimization.java    |    44 +-
 .../hive/ql/optimizer/GenMapRedUtils.java       |    20 +-
 .../hive/ql/optimizer/GroupByOptimizer.java     |    58 +-
 .../ql/optimizer/OperatorComparatorFactory.java |   552 +
 .../hadoop/hive/ql/optimizer/Optimizer.java     |     8 +-
 .../hive/ql/optimizer/PointLookupOptimizer.java |   280 +
 .../hive/ql/optimizer/SimpleFetchOptimizer.java |     5 +-
 .../SparkRemoveDynamicPruningBySize.java        |    73 +
 .../calcite/rules/HiveJoinToMultiJoinRule.java  |    82 +-
 .../calcite/translator/ExprNodeConverter.java   |    21 +-
 .../calcite/translator/HiveOpConverter.java     |    46 +-
 .../translator/PlanModifierForASTConv.java      |     2 +-
 .../translator/PlanModifierForReturnPath.java   |     6 +-
 .../correlation/AbstractCorrelationProcCtx.java |     7 +
 .../correlation/CorrelationUtilities.java       |    11 +-
 .../correlation/ReduceSinkDeDuplication.java    |     6 +-
 .../ql/optimizer/lineage/ExprProcFactory.java   |     9 +-
 .../hive/ql/optimizer/lineage/LineageCtx.java   |    34 +-
 .../ql/optimizer/lineage/OpProcFactory.java     |    10 +-
 .../ql/optimizer/pcr/PcrExprProcFactory.java    |   154 +-
 .../physical/GenSparkSkewJoinProcessor.java     |    14 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |    24 +
 .../hive/ql/optimizer/ppr/OpProcFactory.java    |     3 +-
 .../hive/ql/optimizer/ppr/PartitionPruner.java  |    69 +-
 .../spark/CombineEquivalentWorkResolver.java    |   292 +
 .../spark/SparkPartitionPruningSinkDesc.java    |   100 +
 .../spark/SparkReduceSinkMapJoinProc.java       |     2 +-
 .../stats/annotation/StatsRulesProcFactory.java |    49 +-
 .../apache/hadoop/hive/ql/parse/ASTNode.java    |   139 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |    13 +
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |    45 +-
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |    11 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |    70 +
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |    29 +-
 .../apache/hadoop/hive/ql/parse/ParseUtils.java |     2 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |    74 +-
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |    12 +
 .../hive/ql/parse/TypeCheckProcFactory.java     |    42 +-
 .../ql/parse/spark/GenSparkProcContext.java     |    14 +-
 .../hive/ql/parse/spark/GenSparkUtils.java      |   111 +-
 .../parse/spark/OptimizeSparkProcContext.java   |    16 +-
 .../hive/ql/parse/spark/SparkCompiler.java      |   180 +-
 .../SparkPartitionPruningSinkOperator.java      |   142 +
 .../hive/ql/parse/spark/SplitOpTreeForDPP.java  |   151 +
 .../hive/ql/plan/ExprNodeConstantDesc.java      |    29 +-
 .../apache/hadoop/hive/ql/plan/FilterDesc.java  |    14 +-
 .../hadoop/hive/ql/plan/HiveOperation.java      |    32 +-
 .../hadoop/hive/ql/plan/JoinCondDesc.java       |    14 +
 .../apache/hadoop/hive/ql/plan/JoinDesc.java    |     4 +
 .../org/apache/hadoop/hive/ql/plan/MapWork.java |    20 +-
 .../hadoop/hive/ql/plan/PartitionDesc.java      |    39 +-
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |     9 +-
 .../hadoop/hive/ql/plan/ReduceSinkDesc.java     |     1 +
 .../hive/ql/plan/SparkHashTableSinkDesc.java    |    11 +
 .../hadoop/hive/ql/plan/TableScanDesc.java      |     6 +-
 .../hive/ql/ppd/SyntheticJoinPredicate.java     |    14 +-
 .../ql/processors/CommandProcessorResponse.java |    21 +-
 .../hadoop/hive/ql/processors/HiveCommand.java  |     3 +
 .../hadoop/hive/ql/processors/SetProcessor.java |     4 +
 .../authorization/plugin/HiveOperationType.java |     5 +
 .../plugin/sqlstd/Operation2Privilege.java      |    11 +
 .../hadoop/hive/ql/session/SessionState.java    |    44 +-
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |    54 +-
 .../hive/ql/udf/generic/GenericUDAFStd.java     |     2 +
 .../ql/udf/generic/GenericUDAFVariance.java     |     2 +
 .../hive/ql/udf/generic/GenericUDFBRound.java   |    68 +
 .../hive/ql/udf/generic/GenericUDFBridge.java   |     8 +-
 .../hive/ql/udf/generic/GenericUDFIn.java       |    14 +-
 .../hive/ql/udf/generic/GenericUDFOPAnd.java    |    59 +-
 .../hive/ql/udf/generic/GenericUDFOPOr.java     |    59 +-
 .../hive/ql/udf/generic/GenericUDFRound.java    |    41 +-
 .../hive/ql/udf/generic/GenericUDFStruct.java   |    25 +-
 .../hadoop/hive/ql/udf/generic/RoundUtils.java  |    14 +
 .../main/resources/hive-exec-log4j.properties   |    77 -
 ql/src/main/resources/hive-exec-log4j2.xml      |   110 +
 .../apache/hadoop/hive/ql/TestTxnCommands.java  |   473 +
 .../exec/vector/TestVectorizationContext.java   |    93 +
 .../exec/vector/TestVectorizedRowBatchCtx.java  |     6 +-
 .../TestVectorLogicalExpressions.java           |   282 +
 .../hive/ql/io/orc/TestInputOutputFormat.java   |   118 +-
 .../hadoop/hive/ql/io/orc/TestOrcFile.java      |    11 +-
 .../hive/ql/io/orc/TestRecordReaderImpl.java    |    63 +-
 .../hive/ql/io/orc/TestVectorizedORCReader.java |    75 +-
 .../parquet/TestParquetRecordReaderWrapper.java |   155 +
 .../read/TestParquetFilterPredicate.java        |    51 +
 .../ql/io/sarg/TestConvertAstToSearchArg.java   |  2856 ++
 .../hive/ql/io/sarg/TestSearchArgumentImpl.java |  2891 +-
 .../hive/ql/lockmgr/TestDbTxnManager.java       |    55 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java      |     2 +-
 .../hadoop/hive/ql/log/TestLog4j2Appenders.java |    95 +
 .../hadoop/hive/ql/metadata/StringAppender.java |   128 +
 .../hadoop/hive/ql/metadata/TestHive.java       |    50 +-
 .../positive/TestTransactionStatement.java      |   102 +
 .../hive/ql/session/TestSessionState.java       |     2 +-
 .../ql/udf/generic/TestGenericUDFBRound.java    |   202 +
 .../queries/clientnegative/ctas_noemptyfolder.q |    10 +
 .../clientnegative/mismatch_columns_insertion.q |     4 +
 .../annotate_stats_deep_filters.q               |    67 +
 .../clientpositive/authorization_1_sql_std.q    |     4 +
 .../clientpositive/cast_tinyint_to_double.q     |     7 +
 ql/src/test/queries/clientpositive/cbo_rp_gby.q |    24 +
 .../queries/clientpositive/cbo_rp_gby_empty.q   |    30 +
 .../test/queries/clientpositive/cbo_rp_insert.q |    17 +
 .../test/queries/clientpositive/cbo_rp_join.q   |    65 +
 .../test/queries/clientpositive/cbo_rp_limit.q  |    16 +
 .../queries/clientpositive/cbo_rp_semijoin.q    |    17 +
 .../clientpositive/cbo_rp_simple_select.q       |    56 +
 .../test/queries/clientpositive/cbo_rp_stats.q  |    10 +
 .../queries/clientpositive/cbo_rp_subq_exists.q |    67 +
 .../queries/clientpositive/cbo_rp_subq_in.q     |    56 +
 .../queries/clientpositive/cbo_rp_subq_not_in.q |    81 +
 .../queries/clientpositive/cbo_rp_udf_udaf.q    |    20 +
 .../test/queries/clientpositive/cbo_rp_union.q  |    14 +
 .../test/queries/clientpositive/cbo_rp_views.q  |    46 +
 .../queries/clientpositive/cbo_rp_windowing.q   |    21 +
 .../queries/clientpositive/compustat_avro.q     |     8 +-
 .../test/queries/clientpositive/create_like.q   |    12 +
 .../queries/clientpositive/dynamic_rdd_cache.q  |   111 +
 .../queries/clientpositive/flatten_and_or.q     |    17 +
 ql/src/test/queries/clientpositive/groupby5.q   |     2 +
 .../clientpositive/insertoverwrite_bucket.q     |    28 +
 ql/src/test/queries/clientpositive/lineage3.q   |    22 +-
 .../clientpositive/load_dyn_part14_win.q        |    18 +-
 .../queries/clientpositive/macro_duplicate.q    |    10 +
 .../test/queries/clientpositive/orc_file_dump.q |    57 +
 .../test/queries/clientpositive/orc_ppd_basic.q |   177 +
 .../clientpositive/parquet_predicate_pushdown.q |     9 +
 .../spark_dynamic_partition_pruning.q           |   180 +
 .../spark_dynamic_partition_pruning_2.q         |   118 +
 ...spark_vectorized_dynamic_partition_pruning.q |   192 +
 .../test/queries/clientpositive/stats_ppr_all.q |    24 +
 ql/src/test/queries/clientpositive/structin.q   |    17 +
 ql/src/test/queries/clientpositive/udf_bround.q |    44 +
 .../clientpositive/udf_from_utc_timestamp.q     |    30 +-
 .../queries/clientpositive/udf_percentile.q     |     2 +
 .../clientpositive/udf_to_utc_timestamp.q       |    30 +-
 .../clientpositive/unionall_unbalancedppd.q     |   192 +
 .../test/queries/clientpositive/vector_acid3.q  |    17 +
 .../test/queries/clientpositive/vector_bround.q |    14 +
 .../clientnegative/ctas_noemptyfolder.q.out     |    19 +
 .../clientnegative/exchange_partition.q.out     |     2 +-
 .../mismatch_columns_insertion.q.out            |     9 +
 .../groupby2_map_skew_multi_distinct.q.out      |     9 +
 .../spark/groupby2_multi_distinct.q.out         |     9 +
 .../groupby3_map_skew_multi_distinct.q.out      |     9 +
 .../spark/groupby3_multi_distinct.q.out         |     9 +
 .../spark/groupby_grouping_sets7.q.out          |     9 +
 .../alter_partition_coltype.q.out               |    12 +-
 .../annotate_stats_deep_filters.q.out           |   244 +
 .../clientpositive/annotate_stats_filter.q.out  |     8 +-
 .../authorization_1_sql_std.q.out               |    11 +
 .../clientpositive/cast_tinyint_to_double.q.out |    38 +
 .../results/clientpositive/cbo_rp_gby.q.out     |   124 +
 .../clientpositive/cbo_rp_gby_empty.q.out       |    77 +
 .../results/clientpositive/cbo_rp_insert.q.out  |    89 +
 .../results/clientpositive/cbo_rp_join.q.out    | 15028 ++++++++
 .../results/clientpositive/cbo_rp_limit.q.out   |    90 +
 .../clientpositive/cbo_rp_semijoin.q.out        |   440 +
 .../clientpositive/cbo_rp_simple_select.q.out   |   755 +
 .../results/clientpositive/cbo_rp_stats.q.out   |    14 +
 .../clientpositive/cbo_rp_subq_exists.q.out     |   297 +
 .../results/clientpositive/cbo_rp_subq_in.q.out |   151 +
 .../clientpositive/cbo_rp_subq_not_in.q.out     |   365 +
 .../clientpositive/cbo_rp_udf_udaf.q.out        |   125 +
 .../results/clientpositive/cbo_rp_union.q.out   |   920 +
 .../results/clientpositive/cbo_rp_views.q.out   |   237 +
 .../clientpositive/cbo_rp_windowing.q.out       |   293 +
 .../results/clientpositive/compustat_avro.q.out |     8 +-
 .../clientpositive/convert_enum_to_string.q.out |     9 +-
 .../results/clientpositive/create_like.q.out    |    66 +
 .../clientpositive/dynamic_rdd_cache.q.out      |  1428 +
 .../clientpositive/exchange_partition.q.out     |     4 +-
 .../clientpositive/exchange_partition2.q.out    |     4 +-
 .../clientpositive/exchange_partition3.q.out    |     4 +-
 .../results/clientpositive/flatten_and_or.q.out |    66 +
 .../test/results/clientpositive/groupby5.q.out  |     8 +-
 .../groupby_multi_single_reducer2.q.out         |     2 +-
 .../groupby_multi_single_reducer3.q.out         |    12 +-
 .../clientpositive/groupby_sort_1_23.q.out      |    56 +-
 .../clientpositive/groupby_sort_skew_1_23.q.out |    56 +-
 .../clientpositive/infer_const_type.q.out       |     7 +-
 .../clientpositive/input_testxpath4.q.out       |     2 +-
 .../clientpositive/insertoverwrite_bucket.q.out |   104 +
 .../join_cond_pushdown_unqual4.q.out            |     2 +-
 .../test/results/clientpositive/lineage3.q.out  |    65 +-
 .../clientpositive/load_dyn_part14_win.q.out    |   167 +-
 .../clientpositive/macro_duplicate.q.out        |    56 +
 .../results/clientpositive/multi_insert.q.out   |     8 +-
 .../clientpositive/multi_insert_gby.q.out       |     2 +-
 .../multi_insert_lateral_view.q.out             |     4 +-
 ...i_insert_move_tasks_share_dependencies.q.out |   360 +-
 .../test/results/clientpositive/null_cast.q.out |     6 +-
 .../results/clientpositive/orc_file_dump.q.out  |   447 +
 .../clientpositive/orc_predicate_pushdown.q.out |    36 +-
 .../parquet_predicate_pushdown.q.out            |    47 +
 ql/src/test/results/clientpositive/pcr.q.out    |    12 +-
 .../results/clientpositive/ppd_gby_join.q.out   |     4 +-
 .../test/results/clientpositive/ppd_join.q.out  |     4 +-
 .../test/results/clientpositive/ppd_join2.q.out |    22 +-
 .../test/results/clientpositive/ppd_join3.q.out |    52 +-
 .../clientpositive/ppd_outer_join4.q.out        |     2 +-
 .../results/clientpositive/ppd_transform.q.out  |    12 +-
 ql/src/test/results/clientpositive/ptf.q.out    |    27 +-
 .../results/clientpositive/show_functions.q.out |     1 +
 .../clientpositive/spark/auto_join18.q.out      |    24 +-
 .../clientpositive/spark/auto_join30.q.out      |    51 +-
 .../clientpositive/spark/auto_join32.q.out      |    24 +-
 .../spark/auto_smb_mapjoin_14.q.out             |    30 +-
 .../spark/auto_sortmerge_join_10.q.out          |    23 +-
 .../results/clientpositive/spark/bucket2.q.out  |     3 -
 .../results/clientpositive/spark/bucket3.q.out  |     3 -
 .../results/clientpositive/spark/bucket4.q.out  |     3 -
 .../spark/column_access_stats.q.out             |     4 -
 .../spark/dynamic_rdd_cache.q.out               |  1073 +
 .../clientpositive/spark/groupby10.q.out        |    32 +-
 .../clientpositive/spark/groupby1_map.q.out     |   412 +
 .../spark/groupby1_map_nomap.q.out              |   408 +
 .../spark/groupby1_map_skew.q.out               |   427 +
 .../clientpositive/spark/groupby1_noskew.q.out  |   406 +
 .../clientpositive/spark/groupby2_map.q.out     |   118 +
 .../spark/groupby2_map_multi_distinct.q.out     |   232 +
 .../spark/groupby2_map_skew.q.out               |   129 +
 .../clientpositive/spark/groupby2_noskew.q.out  |   111 +
 .../spark/groupby2_noskew_multi_distinct.q.out  |   114 +
 .../clientpositive/spark/groupby4_map.q.out     |    93 +
 .../spark/groupby4_map_skew.q.out               |    93 +
 .../clientpositive/spark/groupby4_noskew.q.out  |   104 +
 .../results/clientpositive/spark/groupby5.q.out |   433 +
 .../clientpositive/spark/groupby5_map.q.out     |    95 +
 .../spark/groupby5_map_skew.q.out               |    95 +
 .../clientpositive/spark/groupby5_noskew.q.out  |   418 +
 .../results/clientpositive/spark/groupby6.q.out |   113 +
 .../clientpositive/spark/groupby6_map.q.out     |   109 +
 .../spark/groupby6_map_skew.q.out               |   122 +
 .../clientpositive/spark/groupby6_noskew.q.out  |   104 +
 .../clientpositive/spark/groupby7_map.q.out     |    23 +-
 .../spark/groupby7_map_skew.q.out               |    38 +-
 .../clientpositive/spark/groupby7_noskew.q.out  |    17 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |    18 +-
 .../results/clientpositive/spark/groupby8.q.out |    62 +-
 .../spark/groupby8_map_skew.q.out               |    37 +-
 .../spark/groupby_grouping_id2.q.out            |   230 +
 .../spark/groupby_multi_single_reducer2.q.out   |     2 +-
 .../spark/groupby_multi_single_reducer3.q.out   |    12 +-
 .../clientpositive/spark/groupby_position.q.out |    37 +-
 .../spark/groupby_ppr_multi_distinct.q.out      |   346 +
 .../spark/groupby_resolution.q.out              |   796 +
 .../spark/groupby_sort_1_23.q.out               |    90 +-
 .../spark/groupby_sort_skew_1_23.q.out          |    90 +-
 .../clientpositive/spark/insert_into3.q.out     |    33 +-
 .../results/clientpositive/spark/join18.q.out   |    24 +-
 .../results/clientpositive/spark/join22.q.out   |    19 +-
 .../spark/join_cond_pushdown_unqual4.q.out      |     2 +-
 .../spark/limit_partition_metadataonly.q.out    |     2 -
 .../clientpositive/spark/limit_pushdown.q.out   |    31 +-
 .../spark/list_bucket_dml_2.q.java1.7.out       |     3 -
 .../clientpositive/spark/load_dyn_part14.q.out  |    30 +-
 .../clientpositive/spark/multi_insert.q.out     |     8 +-
 .../clientpositive/spark/multi_insert_gby.q.out |     2 +-
 .../spark/multi_insert_lateral_view.q.out       |     4 +-
 ...i_insert_move_tasks_share_dependencies.q.out |   536 +-
 .../clientpositive/spark/nullgroup.q.out        |   265 +
 .../clientpositive/spark/nullgroup2.q.out       |   300 +
 .../clientpositive/spark/nullgroup4.q.out       |   292 +
 .../spark/nullgroup4_multi_distinct.q.out       |   133 +
 .../spark/optimize_nullscan.q.out               |     3 -
 .../test/results/clientpositive/spark/pcr.q.out |    18 +-
 .../clientpositive/spark/ppd_gby_join.q.out     |     4 +-
 .../results/clientpositive/spark/ppd_join.q.out |     4 +-
 .../clientpositive/spark/ppd_join2.q.out        |    22 +-
 .../clientpositive/spark/ppd_join3.q.out        |    52 +-
 .../clientpositive/spark/ppd_outer_join4.q.out  |     2 +-
 .../clientpositive/spark/ppd_transform.q.out    |    12 +-
 .../test/results/clientpositive/spark/ptf.q.out |    17 +-
 .../results/clientpositive/spark/sample3.q.out  |     3 -
 .../results/clientpositive/spark/sample9.q.out  |     3 -
 .../clientpositive/spark/skewjoinopt11.q.out    |    60 +-
 .../clientpositive/spark/skewjoinopt9.q.out     |    20 +-
 .../clientpositive/spark/smb_mapjoin_11.q.out   |     6 -
 .../spark/spark_dynamic_partition_pruning.q.out |  5573 +++
 .../spark_dynamic_partition_pruning_2.q.out     |  1015 +
 ...k_vectorized_dynamic_partition_pruning.q.out |  5822 +++
 .../clientpositive/spark/temp_table_gb1.q.out   |    67 +
 .../clientpositive/spark/udaf_collect_set.q.out |   212 +
 .../clientpositive/spark/udf_example_add.q.out  |     3 -
 .../clientpositive/spark/udf_in_file.q.out      |     3 -
 .../results/clientpositive/spark/udf_max.q.out  |    62 +
 .../results/clientpositive/spark/udf_min.q.out  |    62 +
 .../clientpositive/spark/udf_percentile.q.out   |   450 +
 .../results/clientpositive/spark/union10.q.out  |    36 +-
 .../results/clientpositive/spark/union11.q.out  |    38 +-
 .../results/clientpositive/spark/union15.q.out  |    23 +-
 .../results/clientpositive/spark/union16.q.out  |   450 +-
 .../results/clientpositive/spark/union2.q.out   |    36 +-
 .../results/clientpositive/spark/union20.q.out  |    18 +-
 .../results/clientpositive/spark/union25.q.out  |    21 +-
 .../results/clientpositive/spark/union28.q.out  |    21 +-
 .../results/clientpositive/spark/union3.q.out   |    45 +-
 .../results/clientpositive/spark/union30.q.out  |    21 +-
 .../results/clientpositive/spark/union4.q.out   |    18 +-
 .../results/clientpositive/spark/union5.q.out   |    20 +-
 .../results/clientpositive/spark/union9.q.out   |    54 +-
 .../clientpositive/spark/union_remove_1.q.out   |    23 +-
 .../clientpositive/spark/union_remove_15.q.out  |    23 +-
 .../clientpositive/spark/union_remove_16.q.out  |    23 +-
 .../clientpositive/spark/union_remove_18.q.out  |    23 +-
 .../clientpositive/spark/union_remove_19.q.out  |    75 +-
 .../clientpositive/spark/union_remove_20.q.out  |    23 +-
 .../clientpositive/spark/union_remove_21.q.out  |    21 +-
 .../clientpositive/spark/union_remove_22.q.out  |    46 +-
 .../clientpositive/spark/union_remove_24.q.out  |    23 +-
 .../clientpositive/spark/union_remove_25.q.out  |    59 +-
 .../clientpositive/spark/union_remove_4.q.out   |    23 +-
 .../clientpositive/spark/union_remove_6.q.out   |    23 +-
 .../spark/union_remove_6_subq.q.out             |    84 +-
 .../clientpositive/spark/union_remove_7.q.out   |    23 +-
 .../clientpositive/spark/union_top_level.q.out  |    59 +-
 .../clientpositive/spark/union_view.q.out       |    33 +-
 .../spark/vector_count_distinct.q.out           |    31 +-
 .../spark/vector_decimal_mapjoin.q.out          |     1 +
 .../clientpositive/spark/vector_elt.q.out       |     7 -
 .../spark/vector_left_outer_join.q.out          |     2 +
 .../spark/vector_mapjoin_reduce.q.out           |     1 +
 .../spark/vector_string_concat.q.out            |     3 -
 .../clientpositive/spark/vectorization_0.q.out  |     2 +-
 .../clientpositive/spark/vectorization_13.q.out |     4 +-
 .../clientpositive/spark/vectorization_15.q.out |     2 +-
 .../clientpositive/spark/vectorization_17.q.out |    12 +-
 .../spark/vectorization_decimal_date.q.out      |     4 -
 .../spark/vectorization_div0.q.out              |     3 -
 .../spark/vectorization_short_regress.q.out     |    40 +-
 .../clientpositive/spark/vectorized_case.q.out  |     5 +-
 .../spark/vectorized_mapjoin.q.out              |     1 +
 .../spark/vectorized_math_funcs.q.out           |     3 -
 .../spark/vectorized_nested_mapjoin.q.out       |     2 +
 .../clientpositive/spark/vectorized_ptf.q.out   |    21 +-
 .../spark/vectorized_string_funcs.q.out         |     3 -
 .../results/clientpositive/stats_ppr_all.q.out  |   284 +
 .../test/results/clientpositive/structin.q.out  |    66 +
 .../clientpositive/tez/explainuser_1.q.out      |    71 +-
 .../clientpositive/tez/orc_ppd_basic.q.out      |   701 +
 .../test/results/clientpositive/tez/ptf.q.out   |    15 +-
 .../results/clientpositive/tez/union2.q.out     |    28 +-
 .../results/clientpositive/tez/union9.q.out     |    40 +-
 .../clientpositive/tez/vector_acid3.q.out       |    31 +
 .../tez/vector_mr_diff_schema_alias.q.out       |     2 +-
 .../tez/vector_null_projection.q.out            |     4 +
 .../clientpositive/tez/vectorization_0.q.out    |     2 +-
 .../clientpositive/tez/vectorization_13.q.out   |     4 +-
 .../clientpositive/tez/vectorization_15.q.out   |     2 +-
 .../clientpositive/tez/vectorization_17.q.out   |    12 +-
 .../clientpositive/tez/vectorization_7.q.out    |     4 +-
 .../clientpositive/tez/vectorization_8.q.out    |     4 +-
 .../tez/vectorization_short_regress.q.out       |    40 +-
 .../clientpositive/tez/vectorized_case.q.out    |     2 +-
 .../clientpositive/tez/vectorized_ptf.q.out     |    19 +-
 .../results/clientpositive/udf_bround.q.out     |   119 +
 .../clientpositive/udf_from_utc_timestamp.q.out |    66 +-
 .../results/clientpositive/udf_inline.q.out     |     8 +-
 ql/src/test/results/clientpositive/udf_or.q.out |     4 +-
 .../results/clientpositive/udf_percentile.q.out |   104 +-
 .../results/clientpositive/udf_struct.q.out     |     2 +-
 .../clientpositive/udf_to_utc_timestamp.q.out   |    66 +-
 .../test/results/clientpositive/udf_union.q.out |     2 +-
 .../test/results/clientpositive/union16.q.out   |   354 +-
 ql/src/test/results/clientpositive/union2.q.out |    32 +-
 ql/src/test/results/clientpositive/union9.q.out |    46 +-
 .../clientpositive/union_remove_6_subq.q.out    |    34 +-
 .../results/clientpositive/union_view.q.out     |    24 +
 .../clientpositive/unionall_unbalancedppd.q.out |   653 +
 .../results/clientpositive/vector_acid3.q.out   |    31 +
 .../results/clientpositive/vector_bround.q.out  |    86 +
 .../vector_mr_diff_schema_alias.q.out           |     2 +-
 .../clientpositive/vector_null_projection.q.out |     4 +
 .../clientpositive/vectorization_0.q.out        |     2 +-
 .../clientpositive/vectorization_13.q.out       |     4 +-
 .../clientpositive/vectorization_15.q.out       |     2 +-
 .../clientpositive/vectorization_17.q.out       |    12 +-
 .../clientpositive/vectorization_7.q.out        |     4 +-
 .../clientpositive/vectorization_8.q.out        |     4 +-
 .../vectorization_short_regress.q.out           |    40 +-
 .../clientpositive/vectorized_case.q.out        |     9 +-
 .../results/clientpositive/vectorized_ptf.q.out |    67 +-
 .../gen/thrift/gen-cpp/complex_constants.cpp    |     2 +-
 .../src/gen/thrift/gen-cpp/complex_constants.h  |     2 +-
 serde/src/gen/thrift/gen-cpp/complex_types.cpp  |   442 +-
 serde/src/gen/thrift/gen-cpp/complex_types.h    |   174 +-
 .../gen/thrift/gen-cpp/megastruct_constants.cpp |     2 +-
 .../gen/thrift/gen-cpp/megastruct_constants.h   |     2 +-
 .../src/gen/thrift/gen-cpp/megastruct_types.cpp |   585 +-
 serde/src/gen/thrift/gen-cpp/megastruct_types.h |   175 +-
 .../src/gen/thrift/gen-cpp/serde_constants.cpp  |     2 +-
 serde/src/gen/thrift/gen-cpp/serde_constants.h  |     2 +-
 serde/src/gen/thrift/gen-cpp/serde_types.cpp    |     5 +-
 serde/src/gen/thrift/gen-cpp/serde_types.h      |     5 +-
 .../gen/thrift/gen-cpp/testthrift_constants.cpp |     2 +-
 .../gen/thrift/gen-cpp/testthrift_constants.h   |     2 +-
 .../src/gen/thrift/gen-cpp/testthrift_types.cpp |    95 +-
 serde/src/gen/thrift/gen-cpp/testthrift_types.h |    45 +-
 .../hadoop/hive/serde/serdeConstants.java       |     7 +-
 .../hadoop/hive/serde/test/InnerStruct.java     |    24 +-
 .../hadoop/hive/serde/test/ThriftTestObj.java   |    62 +-
 .../hadoop/hive/serde2/thrift/test/Complex.java |   279 +-
 .../hive/serde2/thrift/test/IntString.java      |    40 +-
 .../hive/serde2/thrift/test/MegaStruct.java     |   521 +-
 .../hive/serde2/thrift/test/MiniStruct.java     |    38 +-
 .../hadoop/hive/serde2/thrift/test/MyEnum.java  |     2 +-
 .../hive/serde2/thrift/test/PropValueUnion.java |    60 +-
 .../hive/serde2/thrift/test/SetIntString.java   |    54 +-
 serde/src/gen/thrift/gen-php/Types.php          |    15 +-
 .../org/apache/hadoop/hive/serde/Types.php      |   373 +-
 .../src/gen/thrift/gen-py/complex/constants.py  |     2 +-
 serde/src/gen/thrift/gen-py/complex/ttypes.py   |    50 +-
 .../gen/thrift/gen-py/megastruct/constants.py   |     2 +-
 .../src/gen/thrift/gen-py/megastruct/ttypes.py  |    44 +-
 .../org_apache_hadoop_hive_serde/constants.py   |     2 +-
 .../org_apache_hadoop_hive_serde/ttypes.py      |     2 +-
 .../gen/thrift/gen-py/testthrift/constants.py   |     2 +-
 .../src/gen/thrift/gen-py/testthrift/ttypes.py  |    14 +-
 .../src/gen/thrift/gen-rb/complex_constants.rb  |     2 +-
 serde/src/gen/thrift/gen-rb/complex_types.rb    |     2 +-
 .../gen/thrift/gen-rb/megastruct_constants.rb   |     2 +-
 serde/src/gen/thrift/gen-rb/megastruct_types.rb |     2 +-
 serde/src/gen/thrift/gen-rb/serde_constants.rb  |     2 +-
 serde/src/gen/thrift/gen-rb/serde_types.rb      |     2 +-
 .../gen/thrift/gen-rb/testthrift_constants.rb   |     2 +-
 serde/src/gen/thrift/gen-rb/testthrift_types.rb |     2 +-
 .../hadoop/hive/ql/io/sarg/ExpressionTree.java  |   157 -
 .../hadoop/hive/ql/io/sarg/PredicateLeaf.java   |    87 -
 .../hadoop/hive/ql/io/sarg/SearchArgument.java  |   278 -
 .../apache/hadoop/hive/serde2/SerDeUtils.java   |    14 +-
 .../apache/hadoop/hive/serde2/WriteBuffers.java |     2 +-
 .../hadoop/hive/serde2/avro/InstanceCache.java  |     9 +-
 .../hive/serde2/io/HiveDecimalWritable.java     |   185 -
 .../hive/serde2/lazy/LazyHiveDecimal.java       |     3 +-
 .../lazy/fast/LazySimpleDeserializeRead.java    |    16 +-
 .../lazybinary/LazyBinaryHiveDecimal.java       |     2 +-
 .../hive/serde2/lazybinary/LazyBinarySerDe.java |    26 +-
 .../fast/LazyBinaryDeserializeRead.java         |     6 +-
 .../fast/LazyBinarySerializeWrite.java          |     5 +-
 .../objectinspector/ObjectInspectorFactory.java |     7 +-
 .../objectinspector/ObjectInspectorUtils.java   |    19 +
 .../StandardConstantStructObjectInspector.java  |    51 +
 .../hive/serde2/typeinfo/HiveDecimalUtils.java  |    35 +-
 .../hive/serde2/typeinfo/TypeInfoUtils.java     |     2 +-
 .../hive/serde2/avro/TestInstanceCache.java     |    40 +-
 service/src/gen/thrift/gen-cpp/TCLIService.cpp  |   458 +-
 service/src/gen/thrift/gen-cpp/TCLIService.h    |   821 +-
 .../thrift/gen-cpp/TCLIService_constants.cpp    |     2 +-
 .../gen/thrift/gen-cpp/TCLIService_constants.h  |     2 +-
 .../gen/thrift/gen-cpp/TCLIService_types.cpp    |  3250 +-
 .../src/gen/thrift/gen-cpp/TCLIService_types.h  |  1482 +-
 service/src/gen/thrift/gen-cpp/ThriftHive.cpp   |   286 +-
 service/src/gen/thrift/gen-cpp/ThriftHive.h     |   389 +-
 .../thrift/gen-cpp/hive_service_constants.cpp   |     2 +-
 .../gen/thrift/gen-cpp/hive_service_constants.h |     2 +-
 .../gen/thrift/gen-cpp/hive_service_types.cpp   |   110 +-
 .../src/gen/thrift/gen-cpp/hive_service_types.h |    75 +-
 .../hadoop/hive/service/HiveClusterStatus.java  |    68 +-
 .../hive/service/HiveServerException.java       |    40 +-
 .../hadoop/hive/service/JobTrackerState.java    |     2 +-
 .../apache/hadoop/hive/service/ThriftHive.java  |   914 +-
 .../service/cli/thrift/TArrayTypeEntry.java     |    24 +-
 .../hive/service/cli/thrift/TBinaryColumn.java  |    64 +-
 .../hive/service/cli/thrift/TBoolColumn.java    |    62 +-
 .../hive/service/cli/thrift/TBoolValue.java     |    26 +-
 .../hive/service/cli/thrift/TByteColumn.java    |    62 +-
 .../hive/service/cli/thrift/TByteValue.java     |    26 +-
 .../hive/service/cli/thrift/TCLIService.java    |  1734 +-
 .../cli/thrift/TCLIServiceConstants.java        |     7 +-
 .../cli/thrift/TCancelDelegationTokenReq.java   |    32 +-
 .../cli/thrift/TCancelDelegationTokenResp.java  |    24 +-
 .../service/cli/thrift/TCancelOperationReq.java |    24 +-
 .../cli/thrift/TCancelOperationResp.java        |    24 +-
 .../service/cli/thrift/TCloseOperationReq.java  |    24 +-
 .../service/cli/thrift/TCloseOperationResp.java |    24 +-
 .../service/cli/thrift/TCloseSessionReq.java    |    24 +-
 .../service/cli/thrift/TCloseSessionResp.java   |    24 +-
 .../apache/hive/service/cli/thrift/TColumn.java |    20 +-
 .../hive/service/cli/thrift/TColumnDesc.java    |    50 +-
 .../hive/service/cli/thrift/TColumnValue.java   |    20 +-
 .../hive/service/cli/thrift/TDoubleColumn.java  |    62 +-
 .../hive/service/cli/thrift/TDoubleValue.java   |    26 +-
 .../cli/thrift/TExecuteStatementReq.java        |    87 +-
 .../cli/thrift/TExecuteStatementResp.java       |    34 +-
 .../service/cli/thrift/TFetchOrientation.java   |     2 +-
 .../service/cli/thrift/TFetchResultsReq.java    |    54 +-
 .../service/cli/thrift/TFetchResultsResp.java   |    42 +-
 .../service/cli/thrift/TGetCatalogsReq.java     |    24 +-
 .../service/cli/thrift/TGetCatalogsResp.java    |    34 +-
 .../hive/service/cli/thrift/TGetColumnsReq.java |    58 +-
 .../service/cli/thrift/TGetColumnsResp.java     |    34 +-
 .../cli/thrift/TGetDelegationTokenReq.java      |    40 +-
 .../cli/thrift/TGetDelegationTokenResp.java     |    34 +-
 .../service/cli/thrift/TGetFunctionsReq.java    |    50 +-
 .../service/cli/thrift/TGetFunctionsResp.java   |    34 +-
 .../hive/service/cli/thrift/TGetInfoReq.java    |    36 +-
 .../hive/service/cli/thrift/TGetInfoResp.java   |    32 +-
 .../hive/service/cli/thrift/TGetInfoType.java   |     2 +-
 .../hive/service/cli/thrift/TGetInfoValue.java  |    20 +-
 .../cli/thrift/TGetOperationStatusReq.java      |    24 +-
 .../cli/thrift/TGetOperationStatusResp.java     |    62 +-
 .../cli/thrift/TGetResultSetMetadataReq.java    |    24 +-
 .../cli/thrift/TGetResultSetMetadataResp.java   |    34 +-
 .../hive/service/cli/thrift/TGetSchemasReq.java |    42 +-
 .../service/cli/thrift/TGetSchemasResp.java     |    34 +-
 .../service/cli/thrift/TGetTableTypesReq.java   |    24 +-
 .../service/cli/thrift/TGetTableTypesResp.java  |    34 +-
 .../hive/service/cli/thrift/TGetTablesReq.java  |    79 +-
 .../hive/service/cli/thrift/TGetTablesResp.java |    34 +-
 .../service/cli/thrift/TGetTypeInfoReq.java     |    24 +-
 .../service/cli/thrift/TGetTypeInfoResp.java    |    34 +-
 .../service/cli/thrift/THandleIdentifier.java   |    50 +-
 .../hive/service/cli/thrift/TI16Column.java     |    62 +-
 .../hive/service/cli/thrift/TI16Value.java      |    26 +-
 .../hive/service/cli/thrift/TI32Column.java     |    62 +-
 .../hive/service/cli/thrift/TI32Value.java      |    26 +-
 .../hive/service/cli/thrift/TI64Column.java     |    62 +-
 .../hive/service/cli/thrift/TI64Value.java      |    26 +-
 .../hive/service/cli/thrift/TMapTypeEntry.java  |    32 +-
 .../service/cli/thrift/TOpenSessionReq.java     |    91 +-
 .../service/cli/thrift/TOpenSessionResp.java    |    91 +-
 .../service/cli/thrift/TOperationHandle.java    |    54 +-
 .../service/cli/thrift/TOperationState.java     |     2 +-
 .../hive/service/cli/thrift/TOperationType.java |     2 +-
 .../service/cli/thrift/TPrimitiveTypeEntry.java |    38 +-
 .../service/cli/thrift/TProtocolVersion.java    |     2 +-
 .../cli/thrift/TRenewDelegationTokenReq.java    |    32 +-
 .../cli/thrift/TRenewDelegationTokenResp.java   |    24 +-
 .../apache/hive/service/cli/thrift/TRow.java    |    46 +-
 .../apache/hive/service/cli/thrift/TRowSet.java |    86 +-
 .../hive/service/cli/thrift/TSessionHandle.java |    24 +-
 .../apache/hive/service/cli/thrift/TStatus.java |    83 +-
 .../hive/service/cli/thrift/TStatusCode.java    |     2 +-
 .../hive/service/cli/thrift/TStringColumn.java  |    62 +-
 .../hive/service/cli/thrift/TStringValue.java   |    26 +-
 .../service/cli/thrift/TStructTypeEntry.java    |    50 +-
 .../hive/service/cli/thrift/TTableSchema.java   |    46 +-
 .../hive/service/cli/thrift/TTypeDesc.java      |    46 +-
 .../hive/service/cli/thrift/TTypeEntry.java     |    20 +-
 .../apache/hive/service/cli/thrift/TTypeId.java |     2 +-
 .../service/cli/thrift/TTypeQualifierValue.java |    20 +-
 .../service/cli/thrift/TTypeQualifiers.java     |    54 +-
 .../service/cli/thrift/TUnionTypeEntry.java     |    50 +-
 .../cli/thrift/TUserDefinedTypeEntry.java       |    24 +-
 service/src/gen/thrift/gen-php/TCLIService.php  |   269 +-
 service/src/gen/thrift/gen-php/ThriftHive.php   |   125 +-
 service/src/gen/thrift/gen-php/Types.php        |    30 +-
 .../gen-py/TCLIService/TCLIService-remote       |   102 +-
 .../thrift/gen-py/TCLIService/TCLIService.py    |   439 +-
 .../gen/thrift/gen-py/TCLIService/constants.py  |     2 +-
 .../src/gen/thrift/gen-py/TCLIService/ttypes.py |   478 +-
 .../gen-py/hive_service/ThriftHive-remote       |  1021 +-
 .../thrift/gen-py/hive_service/ThriftHive.py    |   266 +-
 .../gen/thrift/gen-py/hive_service/constants.py |     2 +-
 .../gen/thrift/gen-py/hive_service/ttypes.py    |    19 +-
 .../gen/thrift/gen-rb/hive_service_constants.rb |     2 +-
 .../src/gen/thrift/gen-rb/hive_service_types.rb |     2 +-
 .../src/gen/thrift/gen-rb/t_c_l_i_service.rb    |     2 +-
 .../thrift/gen-rb/t_c_l_i_service_constants.rb  |     2 +-
 .../gen/thrift/gen-rb/t_c_l_i_service_types.rb  |     2 +-
 service/src/gen/thrift/gen-rb/thrift_hive.rb    |     2 +-
 .../hive/service/cli/CLIServiceUtils.java       |     7 -
 .../cli/operation/GetColumnsOperation.java      |    10 +-
 .../cli/operation/GetTablesOperation.java       |     7 +-
 .../cli/operation/LogDivertAppender.java        |   223 +-
 .../service/cli/operation/OperationManager.java |    17 +-
 .../service/cli/session/SessionManager.java     |    42 +-
 .../session/TestPluggableHiveSessionImpl.java   |    55 +
 shims/common/pom.xml                            |    17 +-
 .../hadoop/hive/shims/HiveEventCounter.java     |   102 -
 spark-client/pom.xml                            |     5 +
 .../hive/spark/client/SparkClientImpl.java      |    20 +-
 .../hive/spark/client/SparkClientUtilities.java |    13 +-
 .../hive/spark/client/TestSparkClient.java      |     4 +-
 .../src/test/resources/log4j.properties         |    23 -
 spark-client/src/test/resources/log4j2.xml      |    39 +
 storage-api/pom.xml                             |    78 +
 .../hadoop/hive/common/type/HiveDecimal.java    |   313 +
 .../hive/ql/exec/vector/BytesColumnVector.java  |   322 +
 .../hive/ql/exec/vector/ColumnVector.java       |   173 +
 .../ql/exec/vector/DecimalColumnVector.java     |   106 +
 .../hive/ql/exec/vector/DoubleColumnVector.java |   143 +
 .../hive/ql/exec/vector/LongColumnVector.java   |   189 +
 .../hive/ql/exec/vector/VectorizedRowBatch.java |   186 +
 .../hadoop/hive/ql/io/sarg/ExpressionTree.java  |   156 +
 .../hadoop/hive/ql/io/sarg/PredicateLeaf.java   |   104 +
 .../hadoop/hive/ql/io/sarg/SearchArgument.java  |   287 +
 .../hive/ql/io/sarg/SearchArgumentFactory.java  |    28 +
 .../hive/ql/io/sarg/SearchArgumentImpl.java     |   687 +
 .../hive/serde2/io/HiveDecimalWritable.java     |   174 +
 testutils/ptest2/pom.xml                        |    20 +
 .../ptest2/src/main/resources/log4j.properties  |    37 -
 testutils/ptest2/src/main/resources/log4j2.xml  |    79 +
 955 files changed, 130154 insertions(+), 43331 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/itests/qtest/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --cc itests/src/test/resources/testconfiguration.properties
index eb986db,bed621d..c877f85
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@@ -1142,48 -1184,8 +1184,54 @@@ miniSparkOnYarn.query.files=auto_sortme
    truncate_column_buckets.q,\
    uber_reduce.q
  
 +miniHbaseMetastore.query.files=join1.q,\
 +join2.q,\
 +mapjoin1.q,\
 +add_part_multiple.q,\
 +annotate_stats_join.q,\
 +authorization_parts.q,\
 +auto_join1.q,\
 +bucket1.q,\
 +compute_stats_string.q,\
 +create_1.q,\
 +groupby1.q,\
 +groupby12.q,\
 +having.q,\
 +innerjoin.q,\
 +input_part10.q,\
 +input20.q,\
 +join1.q,\
 +join20.q,\
 +leftsemijoin.q,\
 +mapjoin1.q,\
 +multi_insert_gby.q,\
 +orc_create.q,\
 +orc_merge1.q,\
 +show_roles.q,\
 +stats0.q,\
 +statsfs.q,\
 +temp_table.q,\
 +union.q,\
 +union10.q,\
 +alter_partition_change_col,\
 +alter1.q,\
 +analyze_tbl_part.q,\
 +authorization_1.q,\
 +columnstats_part_coltype.q,\
 +ctas.q,\
 +database.q,\
 +drop_partition_with_stats.q,\
 +drop_table_with_stats.q,\
 +inputddl8.q,\
 +order2.q,\
 +partition_date.q,\
 +partition_multilevels.q,\
 +show_partitions.q,\
 +sort.q,\
 +view.q
++
+ spark.query.negative.files=groupby2_map_skew_multi_distinct.q,\
+   groupby2_multi_distinct.q,\
+   groupby3_map_skew_multi_distinct.q,\
+   groupby3_multi_distinct.q,\
+   groupby_grouping_sets7.q

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index fbc8400,0354fe1..fcc4f0b
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@@ -28313,116 -31450,6 +31450,140 @@@ uint32_t ThriftHiveMetastore_fire_liste
    return xfer;
  }
  
++
++ThriftHiveMetastore_flushCache_args::~ThriftHiveMetastore_flushCache_args() throw() {
++}
++
++
 +uint32_t ThriftHiveMetastore_flushCache_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    xfer += iprot->skip(ftype);
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  return xfer;
 +}
 +
 +uint32_t ThriftHiveMetastore_flushCache_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +  uint32_t xfer = 0;
++  oprot->incrementRecursionDepth();
 +  xfer += oprot->writeStructBegin("ThriftHiveMetastore_flushCache_args");
 +
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
++  oprot->decrementRecursionDepth();
 +  return xfer;
 +}
 +
++
++ThriftHiveMetastore_flushCache_pargs::~ThriftHiveMetastore_flushCache_pargs() throw() {
++}
++
++
 +uint32_t ThriftHiveMetastore_flushCache_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +  uint32_t xfer = 0;
++  oprot->incrementRecursionDepth();
 +  xfer += oprot->writeStructBegin("ThriftHiveMetastore_flushCache_pargs");
 +
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
++  oprot->decrementRecursionDepth();
 +  return xfer;
 +}
 +
++
++ThriftHiveMetastore_flushCache_result::~ThriftHiveMetastore_flushCache_result() throw() {
++}
++
++
 +uint32_t ThriftHiveMetastore_flushCache_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    xfer += iprot->skip(ftype);
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  return xfer;
 +}
 +
 +uint32_t ThriftHiveMetastore_flushCache_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 +
 +  uint32_t xfer = 0;
 +
 +  xfer += oprot->writeStructBegin("ThriftHiveMetastore_flushCache_result");
 +
 +  xfer += oprot->writeFieldStop();
 +  xfer += oprot->writeStructEnd();
 +  return xfer;
 +}
 +
++
++ThriftHiveMetastore_flushCache_presult::~ThriftHiveMetastore_flushCache_presult() throw() {
++}
++
++
 +uint32_t ThriftHiveMetastore_flushCache_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 +
 +  uint32_t xfer = 0;
 +  std::string fname;
 +  ::apache::thrift::protocol::TType ftype;
 +  int16_t fid;
 +
 +  xfer += iprot->readStructBegin(fname);
 +
 +  using ::apache::thrift::protocol::TProtocolException;
 +
 +
 +  while (true)
 +  {
 +    xfer += iprot->readFieldBegin(fname, ftype, fid);
 +    if (ftype == ::apache::thrift::protocol::T_STOP) {
 +      break;
 +    }
 +    xfer += iprot->skip(ftype);
 +    xfer += iprot->readFieldEnd();
 +  }
 +
 +  xfer += iprot->readStructEnd();
 +
 +  return xfer;
 +}
 +
  void ThriftHiveMetastoreClient::getMetaConf(std::string& _return, const std::string& key)
  {
    send_getMetaConf(key);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index a534c2e,520c6e3..baa28e3
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@@ -17333,82 -18190,9 +18194,99 @@@ class ThriftHiveMetastore_fire_listener
  
    uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
  
+   friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_fire_listener_event_presult& obj);
  };
  
 +
 +class ThriftHiveMetastore_flushCache_args {
 + public:
 +
++  static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B";
++  static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
++
++  ThriftHiveMetastore_flushCache_args(const ThriftHiveMetastore_flushCache_args&);
++  ThriftHiveMetastore_flushCache_args& operator=(const ThriftHiveMetastore_flushCache_args&);
 +  ThriftHiveMetastore_flushCache_args() {
 +  }
 +
-   virtual ~ThriftHiveMetastore_flushCache_args() throw() {}
- 
++  virtual ~ThriftHiveMetastore_flushCache_args() throw();
 +
 +  bool operator == (const ThriftHiveMetastore_flushCache_args & /* rhs */) const
 +  {
 +    return true;
 +  }
 +  bool operator != (const ThriftHiveMetastore_flushCache_args &rhs) const {
 +    return !(*this == rhs);
 +  }
 +
 +  bool operator < (const ThriftHiveMetastore_flushCache_args & ) const;
 +
 +  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
 +  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 +
++  friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_flushCache_args& obj);
 +};
 +
 +
 +class ThriftHiveMetastore_flushCache_pargs {
 + public:
 +
++  static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B";
++  static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
 +
-   virtual ~ThriftHiveMetastore_flushCache_pargs() throw() {}
 +
++  virtual ~ThriftHiveMetastore_flushCache_pargs() throw();
 +
 +  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 +
++  friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_flushCache_pargs& obj);
 +};
 +
 +
 +class ThriftHiveMetastore_flushCache_result {
 + public:
 +
++  static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B";
++  static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
++
++  ThriftHiveMetastore_flushCache_result(const ThriftHiveMetastore_flushCache_result&);
++  ThriftHiveMetastore_flushCache_result& operator=(const ThriftHiveMetastore_flushCache_result&);
 +  ThriftHiveMetastore_flushCache_result() {
 +  }
 +
-   virtual ~ThriftHiveMetastore_flushCache_result() throw() {}
- 
++  virtual ~ThriftHiveMetastore_flushCache_result() throw();
 +
 +  bool operator == (const ThriftHiveMetastore_flushCache_result & /* rhs */) const
 +  {
 +    return true;
 +  }
 +  bool operator != (const ThriftHiveMetastore_flushCache_result &rhs) const {
 +    return !(*this == rhs);
 +  }
 +
 +  bool operator < (const ThriftHiveMetastore_flushCache_result & ) const;
 +
 +  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
 +  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 +
++  friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_flushCache_result& obj);
 +};
 +
 +
 +class ThriftHiveMetastore_flushCache_presult {
 + public:
 +
++  static const char* ascii_fingerprint; // = "99914B932BD37A50B983C5E7C90AE93B";
++  static const uint8_t binary_fingerprint[16]; // = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
 +
-   virtual ~ThriftHiveMetastore_flushCache_presult() throw() {}
 +
++  virtual ~ThriftHiveMetastore_flushCache_presult() throw();
 +
 +  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
 +
++  friend std::ostream& operator<<(std::ostream& out, const ThriftHiveMetastore_flushCache_presult& obj);
 +};
 +
  class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public  ::facebook::fb303::FacebookServiceClient {
   public:
    ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) :

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java
index 5d44585,37e5bf1..ae12142
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class AbortTxnRequest implements org.apache.thrift.TBase<AbortTxnRequest, AbortTxnRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class AbortTxnRequest implements org.apache.thrift.TBase<AbortTxnRequest, AbortTxnRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AbortTxnRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AbortTxnRequest");
  
    private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
index afb2b79,9c78c49..f60521f
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class AddDynamicPartitions implements org.apache.thrift.TBase<AddDynamicPartitions, AddDynamicPartitions._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class AddDynamicPartitions implements org.apache.thrift.TBase<AddDynamicPartitions, AddDynamicPartitions._Fields>, java.io.Serializable, Cloneable, Comparable<AddDynamicPartitions> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddDynamicPartitions");
  
    private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
index 7bb72ba,fcfaaf3..00a7236
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class AddPartitionsRequest implements org.apache.thrift.TBase<AddPartitionsRequest, AddPartitionsRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class AddPartitionsRequest implements org.apache.thrift.TBase<AddPartitionsRequest, AddPartitionsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AddPartitionsRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsRequest");
  
    private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
index 875e04a,9022019..7150e68
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class AddPartitionsResult implements org.apache.thrift.TBase<AddPartitionsResult, AddPartitionsResult._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class AddPartitionsResult implements org.apache.thrift.TBase<AddPartitionsResult, AddPartitionsResult._Fields>, java.io.Serializable, Cloneable, Comparable<AddPartitionsResult> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsResult");
  
    private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
index 0e1dbcb,917cec0..bf14ac0
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class AggrStats implements org.apache.thrift.TBase<AggrStats, AggrStats._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class AggrStats implements org.apache.thrift.TBase<AggrStats, AggrStats._Fields>, java.io.Serializable, Cloneable, Comparable<AggrStats> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AggrStats");
  
    private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java
index 2a5e58f,d7a317b..16f2cb7
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class AlreadyExistsException extends TException implements org.apache.thrift.TBase<AlreadyExistsException, AlreadyExistsException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class AlreadyExistsException extends TException implements org.apache.thrift.TBase<AlreadyExistsException, AlreadyExistsException._Fields>, java.io.Serializable, Cloneable, Comparable<AlreadyExistsException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlreadyExistsException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java
index 1516b25,00b312d..7ddb91a
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class BinaryColumnStatsData implements org.apache.thrift.TBase<BinaryColumnStatsData, BinaryColumnStatsData._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class BinaryColumnStatsData implements org.apache.thrift.TBase<BinaryColumnStatsData, BinaryColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<BinaryColumnStatsData> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BinaryColumnStatsData");
  
    private static final org.apache.thrift.protocol.TField MAX_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField("maxColLen", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java
index 9ef9c0f,a0f3ab8..f98e56b
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class BooleanColumnStatsData implements org.apache.thrift.TBase<BooleanColumnStatsData, BooleanColumnStatsData._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class BooleanColumnStatsData implements org.apache.thrift.TBase<BooleanColumnStatsData, BooleanColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<BooleanColumnStatsData> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BooleanColumnStatsData");
  
    private static final org.apache.thrift.protocol.TField NUM_TRUES_FIELD_DESC = new org.apache.thrift.protocol.TField("numTrues", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java
index ae34203,82e3031..667d12e
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class CheckLockRequest implements org.apache.thrift.TBase<CheckLockRequest, CheckLockRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class CheckLockRequest implements org.apache.thrift.TBase<CheckLockRequest, CheckLockRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CheckLockRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CheckLockRequest");
  
    private static final org.apache.thrift.protocol.TField LOCKID_FIELD_DESC = new org.apache.thrift.protocol.TField("lockid", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
index b34619f,510dace..dd9aeb7
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ColumnStatistics implements org.apache.thrift.TBase<ColumnStatistics, ColumnStatistics._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ColumnStatistics implements org.apache.thrift.TBase<ColumnStatistics, ColumnStatistics._Fields>, java.io.Serializable, Cloneable, Comparable<ColumnStatistics> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatistics");
  
    private static final org.apache.thrift.protocol.TField STATS_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("statsDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java
index e951d04,cf967b6..09f925c
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ColumnStatisticsDesc implements org.apache.thrift.TBase<ColumnStatisticsDesc, ColumnStatisticsDesc._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ColumnStatisticsDesc implements org.apache.thrift.TBase<ColumnStatisticsDesc, ColumnStatisticsDesc._Fields>, java.io.Serializable, Cloneable, Comparable<ColumnStatisticsDesc> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatisticsDesc");
  
    private static final org.apache.thrift.protocol.TField IS_TBL_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("isTblLevel", org.apache.thrift.protocol.TType.BOOL, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java
index f1257fd,3c2b123..2be715f
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ColumnStatisticsObj implements org.apache.thrift.TBase<ColumnStatisticsObj, ColumnStatisticsObj._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ColumnStatisticsObj implements org.apache.thrift.TBase<ColumnStatisticsObj, ColumnStatisticsObj._Fields>, java.io.Serializable, Cloneable, Comparable<ColumnStatisticsObj> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatisticsObj");
  
    private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("colName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java
index aeb2421,5e8cd04..91483e2
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class CommitTxnRequest implements org.apache.thrift.TBase<CommitTxnRequest, CommitTxnRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class CommitTxnRequest implements org.apache.thrift.TBase<CommitTxnRequest, CommitTxnRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CommitTxnRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CommitTxnRequest");
  
    private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)1);


[04/50] [abbrv] hive git commit: HIVE-11437: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : dealing with insert into (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
HIVE-11437: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : dealing with insert into (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6df52edc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6df52edc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6df52edc

Branch: refs/heads/hbase-metastore
Commit: 6df52edc5ec7dea80b271897128d5037d2d90ef0
Parents: 0b38612
Author: Pengcheng Xiong <px...@hortonworks.com>
Authored: Mon Aug 10 13:00:59 2015 +0300
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Mon Aug 10 13:00:59 2015 +0300

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/CalcitePlanner.java    | 40 ++++++++-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  2 +-
 .../test/queries/clientpositive/cbo_rp_insert.q | 17 ++++
 .../results/clientpositive/cbo_rp_insert.q.out  | 89 ++++++++++++++++++++
 4 files changed, 146 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6df52edc/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 4027229..f26d1df 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -109,6 +109,8 @@ import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.FunctionInfo;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.OperatorFactory;
+import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
@@ -170,6 +172,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
+import org.apache.hadoop.hive.ql.plan.SelectDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.Mode;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -649,7 +652,42 @@ public class CalcitePlanner extends SemanticAnalyzer {
         conf.getVar(HiveConf.ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("strict")).convert(modifiedOptimizedOptiqPlan);
     RowResolver hiveRootRR = genRowResolver(hiveRoot, getQB());
     opParseCtx.put(hiveRoot, new OpParseContext(hiveRootRR));
-    return genFileSinkPlan(getQB().getParseInfo().getClauseNames().iterator().next(), getQB(), hiveRoot);
+    String dest = getQB().getParseInfo().getClauseNames().iterator().next();
+    if (getQB().getParseInfo().getDestSchemaForClause(dest) != null
+        && this.getQB().getTableDesc() == null) {
+      Operator<?> selOp = handleInsertStatement(dest, hiveRoot, hiveRootRR, getQB());
+      return genFileSinkPlan(dest, getQB(), selOp);
+    } else {
+      return genFileSinkPlan(dest, getQB(), hiveRoot);
+    }
+  }
+
+  // This function serves as the wrapper of handleInsertStatementSpec in
+  // SemanticAnalyzer
+  Operator<?> handleInsertStatement(String dest, Operator<?> input, RowResolver inputRR, QB qb)
+      throws SemanticException {
+    ArrayList<ExprNodeDesc> colList = new ArrayList<ExprNodeDesc>();
+    ArrayList<ColumnInfo> columns = inputRR.getColumnInfos();
+    for (int i = 0; i < columns.size(); i++) {
+      ColumnInfo col = columns.get(i);
+      colList.add(new ExprNodeColumnDesc(col));
+    }
+    ASTNode selExprList = qb.getParseInfo().getSelForClause(dest);
+
+    RowResolver out_rwsch = handleInsertStatementSpec(colList, dest, inputRR, inputRR, qb,
+        selExprList);
+
+    ArrayList<String> columnNames = new ArrayList<String>();
+    Map<String, ExprNodeDesc> colExprMap = new HashMap<String, ExprNodeDesc>();
+    for (int i = 0; i < colList.size(); i++) {
+      String outputCol = getColumnInternalName(i);
+      colExprMap.put(outputCol, colList.get(i));
+      columnNames.add(outputCol);
+    }
+    Operator<?> output = putOpInsertMap(OperatorFactory.getAndMakeChild(new SelectDesc(colList,
+        columnNames), new RowSchema(out_rwsch.getColumnInfos()), input), out_rwsch);
+    output.setColumnExprMap(colExprMap);
+    return output;
   }
 
   private RelNode introduceProjectIfNeeded(RelNode optimizedOptiqPlan)

http://git-wip-us.apache.org/repos/asf/hive/blob/6df52edc/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index fe7c1ca..5ea6f3f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -3883,7 +3883,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * @see #handleInsertStatementSpecPhase1(ASTNode, QBParseInfo, org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.Phase1Ctx)
    * @throws SemanticException
    */
-  private RowResolver handleInsertStatementSpec(List<ExprNodeDesc> col_list, String dest,
+  public RowResolver handleInsertStatementSpec(List<ExprNodeDesc> col_list, String dest,
                                          RowResolver outputRR, RowResolver inputRR, QB qb,
                                          ASTNode selExprList) throws SemanticException {
     //(z,x)

http://git-wip-us.apache.org/repos/asf/hive/blob/6df52edc/ql/src/test/queries/clientpositive/cbo_rp_insert.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_insert.q b/ql/src/test/queries/clientpositive/cbo_rp_insert.q
new file mode 100644
index 0000000..eeaeec2
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/cbo_rp_insert.q
@@ -0,0 +1,17 @@
+set hive.cbo.enable=true;
+set hive.cbo.returnpath.hiveop=true;
+
+drop database if exists x314 cascade;
+create database x314;
+use x314;
+create table source(s1 int, s2 int);
+create table target1(x int, y int, z int);
+
+insert into source(s2,s1) values(2,1);
+-- expect source to contain 1 row (1,2)
+select * from source;
+insert into target1(z,x) select * from source;
+-- expect target1 to contain 1 row (2,NULL,1)
+select * from target1;
+
+drop database if exists x314 cascade;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/6df52edc/ql/src/test/results/clientpositive/cbo_rp_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_insert.q.out b/ql/src/test/results/clientpositive/cbo_rp_insert.q.out
new file mode 100644
index 0000000..6428a4b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cbo_rp_insert.q.out
@@ -0,0 +1,89 @@
+PREHOOK: query: drop database if exists x314 cascade
+PREHOOK: type: DROPDATABASE
+POSTHOOK: query: drop database if exists x314 cascade
+POSTHOOK: type: DROPDATABASE
+PREHOOK: query: create database x314
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:x314
+POSTHOOK: query: create database x314
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:x314
+PREHOOK: query: use x314
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:x314
+POSTHOOK: query: use x314
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:x314
+PREHOOK: query: create table source(s1 int, s2 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:x314
+PREHOOK: Output: x314@source
+POSTHOOK: query: create table source(s1 int, s2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:x314
+POSTHOOK: Output: x314@source
+PREHOOK: query: create table target1(x int, y int, z int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:x314
+PREHOOK: Output: x314@target1
+POSTHOOK: query: create table target1(x int, y int, z int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:x314
+POSTHOOK: Output: x314@target1
+PREHOOK: query: insert into source(s2,s1) values(2,1)
+PREHOOK: type: QUERY
+PREHOOK: Input: x314@values__tmp__table__1
+PREHOOK: Output: x314@source
+POSTHOOK: query: insert into source(s2,s1) values(2,1)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: x314@values__tmp__table__1
+POSTHOOK: Output: x314@source
+POSTHOOK: Lineage: source.s1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: source.s2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: -- expect source to contain 1 row (1,2)
+select * from source
+PREHOOK: type: QUERY
+PREHOOK: Input: x314@source
+#### A masked pattern was here ####
+POSTHOOK: query: -- expect source to contain 1 row (1,2)
+select * from source
+POSTHOOK: type: QUERY
+POSTHOOK: Input: x314@source
+#### A masked pattern was here ####
+1	2
+PREHOOK: query: insert into target1(z,x) select * from source
+PREHOOK: type: QUERY
+PREHOOK: Input: x314@source
+PREHOOK: Output: x314@target1
+POSTHOOK: query: insert into target1(z,x) select * from source
+POSTHOOK: type: QUERY
+POSTHOOK: Input: x314@source
+POSTHOOK: Output: x314@target1
+POSTHOOK: Lineage: target1.x SIMPLE [(source)source.FieldSchema(name:s2, type:int, comment:null), ]
+POSTHOOK: Lineage: target1.y EXPRESSION []
+POSTHOOK: Lineage: target1.z SIMPLE [(source)source.FieldSchema(name:s1, type:int, comment:null), ]
+PREHOOK: query: -- expect target1 to contain 1 row (2,NULL,1)
+select * from target1
+PREHOOK: type: QUERY
+PREHOOK: Input: x314@target1
+#### A masked pattern was here ####
+POSTHOOK: query: -- expect target1 to contain 1 row (2,NULL,1)
+select * from target1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: x314@target1
+#### A masked pattern was here ####
+2	NULL	1
+PREHOOK: query: drop database if exists x314 cascade
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:x314
+PREHOOK: Output: database:x314
+PREHOOK: Output: x314@source
+PREHOOK: Output: x314@target1
+PREHOOK: Output: x314@values__tmp__table__1
+POSTHOOK: query: drop database if exists x314 cascade
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:x314
+POSTHOOK: Output: database:x314
+POSTHOOK: Output: x314@source
+POSTHOOK: Output: x314@target1
+POSTHOOK: Output: x314@values__tmp__table__1


[45/50] [abbrv] hive git commit: HIVE-11568 : merge master into branch (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java
index ae39507,a877338..fe60838
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class TxnAbortedException extends TException implements org.apache.thrift.TBase<TxnAbortedException, TxnAbortedException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class TxnAbortedException extends TException implements org.apache.thrift.TBase<TxnAbortedException, TxnAbortedException._Fields>, java.io.Serializable, Cloneable, Comparable<TxnAbortedException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnAbortedException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java
index 18cbe53,8b255b9..266fbe1
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class TxnInfo implements org.apache.thrift.TBase<TxnInfo, TxnInfo._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class TxnInfo implements org.apache.thrift.TBase<TxnInfo, TxnInfo._Fields>, java.io.Serializable, Cloneable, Comparable<TxnInfo> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnInfo");
  
    private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java
index 4f5d02d,05af505..18db1b8
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class TxnOpenException extends TException implements org.apache.thrift.TBase<TxnOpenException, TxnOpenException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class TxnOpenException extends TException implements org.apache.thrift.TBase<TxnOpenException, TxnOpenException._Fields>, java.io.Serializable, Cloneable, Comparable<TxnOpenException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnOpenException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java
index 1882b57,61e7ceb..b330ce2
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Type implements org.apache.thrift.TBase<Type, Type._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Type implements org.apache.thrift.TBase<Type, Type._Fields>, java.io.Serializable, Cloneable, Comparable<Type> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Type");
  
    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java
index ab91419,e05e79d..b7623ca
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class UnknownDBException extends TException implements org.apache.thrift.TBase<UnknownDBException, UnknownDBException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class UnknownDBException extends TException implements org.apache.thrift.TBase<UnknownDBException, UnknownDBException._Fields>, java.io.Serializable, Cloneable, Comparable<UnknownDBException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnknownDBException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java
index 7e28591,c626bf6..bdd674b
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class UnknownPartitionException extends TException implements org.apache.thrift.TBase<UnknownPartitionException, UnknownPartitionException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class UnknownPartitionException extends TException implements org.apache.thrift.TBase<UnknownPartitionException, UnknownPartitionException._Fields>, java.io.Serializable, Cloneable, Comparable<UnknownPartitionException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnknownPartitionException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java
index 7aa8012,2856121..768eb65
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class UnknownTableException extends TException implements org.apache.thrift.TBase<UnknownTableException, UnknownTableException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class UnknownTableException extends TException implements org.apache.thrift.TBase<UnknownTableException, UnknownTableException._Fields>, java.io.Serializable, Cloneable, Comparable<UnknownTableException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnknownTableException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java
index 54b949d,cf248e0..395c15f
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class UnlockRequest implements org.apache.thrift.TBase<UnlockRequest, UnlockRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class UnlockRequest implements org.apache.thrift.TBase<UnlockRequest, UnlockRequest._Fields>, java.io.Serializable, Cloneable, Comparable<UnlockRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnlockRequest");
  
    private static final org.apache.thrift.protocol.TField LOCKID_FIELD_DESC = new org.apache.thrift.protocol.TField("lockid", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java
index aa83fd7,cc8d5f5..f3cbb74
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Version implements org.apache.thrift.TBase<Version, Version._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Version implements org.apache.thrift.TBase<Version, Version._Fields>, java.io.Serializable, Cloneable, Comparable<Version> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Version");
  
    private static final org.apache.thrift.protocol.TField VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("version", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 1684674,ae47cb5..9c73767
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@@ -15,131 -16,997 +16,1000 @@@ use Thrift\Protocol\TBinaryProtocolAcce
  use Thrift\Exception\TApplicationException;
  
  
+ /**
+  * This interface is live.
+  */
  interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
+   /**
+    * @param string $key
+    * @return string
+    * @throws \metastore\MetaException
+    */
    public function getMetaConf($key);
+   /**
+    * @param string $key
+    * @param string $value
+    * @throws \metastore\MetaException
+    */
    public function setMetaConf($key, $value);
+   /**
+    * @param \metastore\Database $database
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
    public function create_database(\metastore\Database $database);
+   /**
+    * @param string $name
+    * @return \metastore\Database
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function get_database($name);
+   /**
+    * @param string $name
+    * @param bool $deleteData
+    * @param bool $cascade
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
    public function drop_database($name, $deleteData, $cascade);
+   /**
+    * @param string $pattern
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
    public function get_databases($pattern);
+   /**
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
    public function get_all_databases();
+   /**
+    * @param string $dbname
+    * @param \metastore\Database $db
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function alter_database($dbname, \metastore\Database $db);
+   /**
+    * @param string $name
+    * @return \metastore\Type
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function get_type($name);
+   /**
+    * @param \metastore\Type $type
+    * @return bool
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
    public function create_type(\metastore\Type $type);
+   /**
+    * @param string $type
+    * @return bool
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function drop_type($type);
+   /**
+    * @param string $name
+    * @return array
+    * @throws \metastore\MetaException
+    */
    public function get_type_all($name);
+   /**
+    * @param string $db_name
+    * @param string $table_name
+    * @return \metastore\FieldSchema[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownDBException
+    */
    public function get_fields($db_name, $table_name);
+   /**
+    * @param string $db_name
+    * @param string $table_name
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return \metastore\FieldSchema[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownDBException
+    */
    public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $table_name
+    * @return \metastore\FieldSchema[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownDBException
+    */
    public function get_schema($db_name, $table_name);
+   /**
+    * @param string $db_name
+    * @param string $table_name
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return \metastore\FieldSchema[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownDBException
+    */
    public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param \metastore\Table $tbl
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function create_table(\metastore\Table $tbl);
+   /**
+    * @param \metastore\Table $tbl
+    * @param \metastore\EnvironmentContext $environment_context
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $dbname
+    * @param string $name
+    * @param bool $deleteData
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function drop_table($dbname, $name, $deleteData);
+   /**
+    * @param string $dbname
+    * @param string $name
+    * @param bool $deleteData
+    * @param \metastore\EnvironmentContext $environment_context
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function drop_table_with_environment_context($dbname, $name, $deleteData, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $pattern
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
    public function get_tables($db_name, $pattern);
+   /**
+    * @param string $db_name
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
    public function get_all_tables($db_name);
+   /**
+    * @param string $dbname
+    * @param string $tbl_name
+    * @return \metastore\Table
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function get_table($dbname, $tbl_name);
-   public function get_table_objects_by_name($dbname, $tbl_names);
+   /**
+    * @param string $dbname
+    * @param string[] $tbl_names
+    * @return \metastore\Table[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\UnknownDBException
+    */
+   public function get_table_objects_by_name($dbname, array $tbl_names);
+   /**
+    * @param string $dbname
+    * @param string $filter
+    * @param int $max_tables
+    * @return string[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\UnknownDBException
+    */
    public function get_table_names_by_filter($dbname, $filter, $max_tables);
+   /**
+    * @param string $dbname
+    * @param string $tbl_name
+    * @param \metastore\Table $new_tbl
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
    public function alter_table($dbname, $tbl_name, \metastore\Table $new_tbl);
+   /**
+    * @param string $dbname
+    * @param string $tbl_name
+    * @param \metastore\Table $new_tbl
+    * @param \metastore\EnvironmentContext $environment_context
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
    public function alter_table_with_environment_context($dbname, $tbl_name, \metastore\Table $new_tbl, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $dbname
+    * @param string $tbl_name
+    * @param \metastore\Table $new_tbl
+    * @param bool $cascade
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
    public function alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade);
+   /**
+    * @param \metastore\Partition $new_part
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
    public function add_partition(\metastore\Partition $new_part);
+   /**
+    * @param \metastore\Partition $new_part
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
    public function add_partition_with_environment_context(\metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context);
-   public function add_partitions($new_parts);
-   public function add_partitions_pspec($new_parts);
-   public function append_partition($db_name, $tbl_name, $part_vals);
+   /**
+    * @param \metastore\Partition[] $new_parts
+    * @return int
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function add_partitions(array $new_parts);
+   /**
+    * @param \metastore\PartitionSpec[] $new_parts
+    * @return int
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function add_partitions_pspec(array $new_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function append_partition($db_name, $tbl_name, array $part_vals);
+   /**
+    * @param \metastore\AddPartitionsRequest $request
+    * @return \metastore\AddPartitionsResult
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
    public function add_partitions_req(\metastore\AddPartitionsRequest $request);
-   public function append_partition_with_environment_context($db_name, $tbl_name, $part_vals, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function append_partition_with_environment_context($db_name, $tbl_name, array $part_vals, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
    public function append_partition_by_name($db_name, $tbl_name, $part_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
    public function append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, \metastore\EnvironmentContext $environment_context);
-   public function drop_partition($db_name, $tbl_name, $part_vals, $deleteData);
-   public function drop_partition_with_environment_context($db_name, $tbl_name, $part_vals, $deleteData, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param bool $deleteData
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_partition($db_name, $tbl_name, array $part_vals, $deleteData);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param bool $deleteData
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_partition_with_environment_context($db_name, $tbl_name, array $part_vals, $deleteData, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @param bool $deleteData
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function drop_partition_by_name($db_name, $tbl_name, $part_name, $deleteData);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @param bool $deleteData
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function drop_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, $deleteData, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param \metastore\DropPartitionsRequest $req
+    * @return \metastore\DropPartitionsResult
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function drop_partitions_req(\metastore\DropPartitionsRequest $req);
-   public function get_partition($db_name, $tbl_name, $part_vals);
-   public function exchange_partition($partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
-   public function get_partition_with_auth($db_name, $tbl_name, $part_vals, $user_name, $group_names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @return \metastore\Partition
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partition($db_name, $tbl_name, array $part_vals);
+   /**
+    * @param array $partitionSpecs
+    * @param string $source_db
+    * @param string $source_table_name
+    * @param string $dest_db
+    * @param string $dest_table_name
+    * @return \metastore\Partition
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\InvalidInputException
+    */
+   public function exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param string $user_name
+    * @param string[] $group_names
+    * @return \metastore\Partition
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @return \metastore\Partition
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function get_partition_by_name($db_name, $tbl_name, $part_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param int $max_parts
+    * @return \metastore\Partition[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function get_partitions($db_name, $tbl_name, $max_parts);
-   public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, $group_names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param int $max_parts
+    * @param string $user_name
+    * @param string[] $group_names
+    * @return \metastore\Partition[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param int $max_parts
+    * @return \metastore\PartitionSpec[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function get_partitions_pspec($db_name, $tbl_name, $max_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param int $max_parts
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
    public function get_partition_names($db_name, $tbl_name, $max_parts);
-   public function get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts);
-   public function get_partitions_ps_with_auth($db_name, $tbl_name, $part_vals, $max_parts, $user_name, $group_names);
-   public function get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param int $max_parts
+    * @return \metastore\Partition[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param int $max_parts
+    * @param string $user_name
+    * @param string[] $group_names
+    * @return \metastore\Partition[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param int $max_parts
+    * @return string[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $filter
+    * @param int $max_parts
+    * @return \metastore\Partition[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $filter
+    * @param int $max_parts
+    * @return \metastore\PartitionSpec[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts);
+   /**
+    * @param \metastore\PartitionsByExprRequest $req
+    * @return \metastore\PartitionsByExprResult
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function get_partitions_by_expr(\metastore\PartitionsByExprRequest $req);
-   public function get_partitions_by_names($db_name, $tbl_name, $names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $names
+    * @return \metastore\Partition[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partitions_by_names($db_name, $tbl_name, array $names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param \metastore\Partition $new_part
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
    public function alter_partition($db_name, $tbl_name, \metastore\Partition $new_part);
-   public function alter_partitions($db_name, $tbl_name, $new_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param \metastore\Partition[] $new_parts
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_partitions($db_name, $tbl_name, array $new_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param \metastore\Partition $new_part
+    * @param \metastore\EnvironmentContext $environment_context
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
    public function alter_partition_with_environment_context($db_name, $tbl_name, \metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context);
-   public function rename_partition($db_name, $tbl_name, $part_vals, \metastore\Partition $new_part);
-   public function partition_name_has_valid_characters($part_vals, $throw_exception);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param \metastore\Partition $new_part
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function rename_partition($db_name, $tbl_name, array $part_vals, \metastore\Partition $new_part);
+   /**
+    * @param string[] $part_vals
+    * @param bool $throw_exception
+    * @return bool
+    * @throws \metastore\MetaException
+    */
+   public function partition_name_has_valid_characters(array $part_vals, $throw_exception);
+   /**
+    * @param string $name
+    * @param string $defaultValue
+    * @return string
+    * @throws \metastore\ConfigValSecurityException
+    */
    public function get_config_value($name, $defaultValue);
+   /**
+    * @param string $part_name
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
    public function partition_name_to_vals($part_name);
+   /**
+    * @param string $part_name
+    * @return array
+    * @throws \metastore\MetaException
+    */
    public function partition_name_to_spec($part_name);
-   public function markPartitionForEvent($db_name, $tbl_name, $part_vals, $eventType);
-   public function isPartitionMarkedForEvent($db_name, $tbl_name, $part_vals, $eventType);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param array $part_vals
+    * @param int $eventType
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\UnknownDBException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownPartitionException
+    * @throws \metastore\InvalidPartitionException
+    */
+   public function markPartitionForEvent($db_name, $tbl_name, array $part_vals, $eventType);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param array $part_vals
+    * @param int $eventType
+    * @return bool
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\UnknownDBException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownPartitionException
+    * @throws \metastore\InvalidPartitionException
+    */
+   public function isPartitionMarkedForEvent($db_name, $tbl_name, array $part_vals, $eventType);
+   /**
+    * @param \metastore\Index $new_index
+    * @param \metastore\Table $index_table
+    * @return \metastore\Index
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
    public function add_index(\metastore\Index $new_index, \metastore\Table $index_table);
+   /**
+    * @param string $dbname
+    * @param string $base_tbl_name
+    * @param string $idx_name
+    * @param \metastore\Index $new_idx
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
    public function alter_index($dbname, $base_tbl_name, $idx_name, \metastore\Index $new_idx);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $index_name
+    * @param bool $deleteData
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function drop_index_by_name($db_name, $tbl_name, $index_name, $deleteData);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $index_name
+    * @return \metastore\Index
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function get_index_by_name($db_name, $tbl_name, $index_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param int $max_indexes
+    * @return \metastore\Index[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function get_indexes($db_name, $tbl_name, $max_indexes);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param int $max_indexes
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
    public function get_index_names($db_name, $tbl_name, $max_indexes);
+   /**
+    * @param \metastore\ColumnStatistics $stats_obj
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidInputException
+    */
    public function update_table_column_statistics(\metastore\ColumnStatistics $stats_obj);
+   /**
+    * @param \metastore\ColumnStatistics $stats_obj
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidInputException
+    */
    public function update_partition_column_statistics(\metastore\ColumnStatistics $stats_obj);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $col_name
+    * @return \metastore\ColumnStatistics
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidInputException
+    * @throws \metastore\InvalidObjectException
+    */
    public function get_table_column_statistics($db_name, $tbl_name, $col_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @param string $col_name
+    * @return \metastore\ColumnStatistics
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidInputException
+    * @throws \metastore\InvalidObjectException
+    */
    public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name);
+   /**
+    * @param \metastore\TableStatsRequest $request
+    * @return \metastore\TableStatsResult
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function get_table_statistics_req(\metastore\TableStatsRequest $request);
+   /**
+    * @param \metastore\PartitionsStatsRequest $request
+    * @return \metastore\PartitionsStatsResult
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function get_partitions_statistics_req(\metastore\PartitionsStatsRequest $request);
+   /**
+    * @param \metastore\PartitionsStatsRequest $request
+    * @return \metastore\AggrStats
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function get_aggr_stats_for(\metastore\PartitionsStatsRequest $request);
+   /**
+    * @param \metastore\SetPartitionsStatsRequest $request
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidInputException
+    */
    public function set_aggr_stats_for(\metastore\SetPartitionsStatsRequest $request);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @param string $col_name
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\InvalidInputException
+    */
    public function delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $col_name
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\InvalidInputException
+    */
    public function delete_table_column_statistics($db_name, $tbl_name, $col_name);
+   /**
+    * @param \metastore\Function $func
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function create_function(\metastore\Function $func);
+   /**
+    * @param string $dbName
+    * @param string $funcName
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
    public function drop_function($dbName, $funcName);
+   /**
+    * @param string $dbName
+    * @param string $funcName
+    * @param \metastore\Function $newFunc
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
    public function alter_function($dbName, $funcName, \metastore\Function $newFunc);
+   /**
+    * @param string $dbName
+    * @param string $pattern
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
    public function get_functions($dbName, $pattern);
+   /**
+    * @param string $dbName
+    * @param string $funcName
+    * @return \metastore\Function
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
    public function get_function($dbName, $funcName);
+   /**
+    * @return \metastore\GetAllFunctionsResponse
+    * @throws \metastore\MetaException
+    */
+   public function get_all_functions();
+   /**
+    * @param \metastore\Role $role
+    * @return bool
+    * @throws \metastore\MetaException
+    */
    public function create_role(\metastore\Role $role);
+   /**
+    * @param string $role_name
+    * @return bool
+    * @throws \metastore\MetaException
+    */
    public function drop_role($role_name);
+   /**
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
    public function get_role_names();
+   /**
+    * @param string $role_name
+    * @param string $principal_name
+    * @param int $principal_type
+    * @param string $grantor
+    * @param int $grantorType
+    * @param bool $grant_option
+    * @return bool
+    * @throws \metastore\MetaException
+    */
    public function grant_role($role_name, $principal_name, $principal_type, $grantor, $grantorType, $grant_option);
+   /**
+    * @param string $role_name
+    * @param string $principal_name
+    * @param int $principal_type
+    * @return bool
+    * @throws \metastore\MetaException
+    */
    public function revoke_role($role_name, $principal_name, $principal_type);
+   /**
+    * @param string $principal_name
+    * @param int $principal_type
+    * @return \metastore\Role[]
+    * @throws \metastore\MetaException
+    */
    public function list_roles($principal_name, $principal_type);
+   /**
+    * @param \metastore\GrantRevokeRoleRequest $request
+    * @return \metastore\GrantRevokeRoleResponse
+    * @throws \metastore\MetaException
+    */
    public function grant_revoke_role(\metastore\GrantRevokeRoleRequest $request);
+   /**
+    * @param \metastore\GetPrincipalsInRoleRequest $request
+    * @return \metastore\GetPrincipalsInRoleResponse
+    * @throws \metastore\MetaException
+    */
    public function get_principals_in_role(\metastore\GetPrincipalsInRoleRequest $request);
+   /**
+    * @param \metastore\GetRoleGrantsForPrincipalRequest $request
+    * @return \metastore\GetRoleGrantsForPrincipalResponse
+    * @throws \metastore\MetaException
+    */
    public function get_role_grants_for_principal(\metastore\GetRoleGrantsForPrincipalRequest $request);
-   public function get_privilege_set(\metastore\HiveObjectRef $hiveObject, $user_name, $group_names);
+   /**
+    * @param \metastore\HiveObjectRef $hiveObject
+    * @param string $user_name
+    * @param string[] $group_names
+    * @return \metastore\PrincipalPrivilegeSet
+    * @throws \metastore\MetaException
+    */
+   public function get_privilege_set(\metastore\HiveObjectRef $hiveObject, $user_name, array $group_names);
+   /**
+    * @param string $principal_name
+    * @param int $principal_type
+    * @param \metastore\HiveObjectRef $hiveObject
+    * @return \metastore\HiveObjectPrivilege[]
+    * @throws \metastore\MetaException
+    */
    public function list_privileges($principal_name, $principal_type, \metastore\HiveObjectRef $hiveObject);
+   /**
+    * @param \metastore\PrivilegeBag $privileges
+    * @return bool
+    * @throws \metastore\MetaException
+    */
    public function grant_privileges(\metastore\PrivilegeBag $privileges);
+   /**
+    * @param \metastore\PrivilegeBag $privileges
+    * @return bool
+    * @throws \metastore\MetaException
+    */
    public function revoke_privileges(\metastore\PrivilegeBag $privileges);
+   /**
+    * @param \metastore\GrantRevokePrivilegeRequest $request
+    * @return \metastore\GrantRevokePrivilegeResponse
+    * @throws \metastore\MetaException
+    */
    public function grant_revoke_privileges(\metastore\GrantRevokePrivilegeRequest $request);
-   public function set_ugi($user_name, $group_names);
+   /**
+    * @param string $user_name
+    * @param string[] $group_names
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function set_ugi($user_name, array $group_names);
+   /**
+    * @param string $token_owner
+    * @param string $renewer_kerberos_principal_name
+    * @return string
+    * @throws \metastore\MetaException
+    */
    public function get_delegation_token($token_owner, $renewer_kerberos_principal_name);
+   /**
+    * @param string $token_str_form
+    * @return int
+    * @throws \metastore\MetaException
+    */
    public function renew_delegation_token($token_str_form);
+   /**
+    * @param string $token_str_form
+    * @throws \metastore\MetaException
+    */
    public function cancel_delegation_token($token_str_form);
+   /**
+    * @return \metastore\GetOpenTxnsResponse
+    */
    public function get_open_txns();
+   /**
+    * @return \metastore\GetOpenTxnsInfoResponse
+    */
    public function get_open_txns_info();
+   /**
+    * @param \metastore\OpenTxnRequest $rqst
+    * @return \metastore\OpenTxnsResponse
+    */
    public function open_txns(\metastore\OpenTxnRequest $rqst);
+   /**
+    * @param \metastore\AbortTxnRequest $rqst
+    * @throws \metastore\NoSuchTxnException
+    */
    public function abort_txn(\metastore\AbortTxnRequest $rqst);
+   /**
+    * @param \metastore\CommitTxnRequest $rqst
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    */
    public function commit_txn(\metastore\CommitTxnRequest $rqst);
+   /**
+    * @param \metastore\LockRequest $rqst
+    * @return \metastore\LockResponse
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    */
    public function lock(\metastore\LockRequest $rqst);
+   /**
+    * @param \metastore\CheckLockRequest $rqst
+    * @return \metastore\LockResponse
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    * @throws \metastore\NoSuchLockException
+    */
    public function check_lock(\metastore\CheckLockRequest $rqst);
+   /**
+    * @param \metastore\UnlockRequest $rqst
+    * @throws \metastore\NoSuchLockException
+    * @throws \metastore\TxnOpenException
+    */
    public function unlock(\metastore\UnlockRequest $rqst);
+   /**
+    * @param \metastore\ShowLocksRequest $rqst
+    * @return \metastore\ShowLocksResponse
+    */
    public function show_locks(\metastore\ShowLocksRequest $rqst);
+   /**
+    * @param \metastore\HeartbeatRequest $ids
+    * @throws \metastore\NoSuchLockException
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    */
    public function heartbeat(\metastore\HeartbeatRequest $ids);
+   /**
+    * @param \metastore\HeartbeatTxnRangeRequest $txns
+    * @return \metastore\HeartbeatTxnRangeResponse
+    */
    public function heartbeat_txn_range(\metastore\HeartbeatTxnRangeRequest $txns);
+   /**
+    * @param \metastore\CompactionRequest $rqst
+    */
    public function compact(\metastore\CompactionRequest $rqst);
+   /**
+    * @param \metastore\ShowCompactRequest $rqst
+    * @return \metastore\ShowCompactResponse
+    */
    public function show_compact(\metastore\ShowCompactRequest $rqst);
+   /**
+    * @param \metastore\AddDynamicPartitions $rqst
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    */
    public function add_dynamic_partitions(\metastore\AddDynamicPartitions $rqst);
+   /**
+    * @param \metastore\NotificationEventRequest $rqst
+    * @return \metastore\NotificationEventResponse
+    */
    public function get_next_notification(\metastore\NotificationEventRequest $rqst);
+   /**
+    * @return \metastore\CurrentNotificationEventId
+    */
    public function get_current_notificationEventId();
+   /**
+    * @param \metastore\FireEventRequest $rqst
+    * @return \metastore\FireEventResponse
+    */
    public function fire_listener_event(\metastore\FireEventRequest $rqst);
++  /**
++   */
 +  public function flushCache();
  }
  
  class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf {
@@@ -7221,53 -8141,6 +8144,53 @@@
      throw new \Exception("fire_listener_event failed: unknown result");
    }
  
 +  public function flushCache()
 +  {
 +    $this->send_flushCache();
 +    $this->recv_flushCache();
 +  }
 +
 +  public function send_flushCache()
 +  {
 +    $args = new \metastore\ThriftHiveMetastore_flushCache_args();
-     $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary');
++    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
 +    if ($bin_accel)
 +    {
 +      thrift_protocol_write_binary($this->output_, 'flushCache', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
 +    }
 +    else
 +    {
 +      $this->output_->writeMessageBegin('flushCache', TMessageType::CALL, $this->seqid_);
 +      $args->write($this->output_);
 +      $this->output_->writeMessageEnd();
 +      $this->output_->getTransport()->flush();
 +    }
 +  }
 +
 +  public function recv_flushCache()
 +  {
-     $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary');
++    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
 +    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_flushCache_result', $this->input_->isStrictRead());
 +    else
 +    {
 +      $rseqid = 0;
 +      $fname = null;
 +      $mtype = 0;
 +
 +      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
 +      if ($mtype == TMessageType::EXCEPTION) {
 +        $x = new TApplicationException();
 +        $x->read($this->input_);
 +        $this->input_->readMessageEnd();
 +        throw $x;
 +      }
 +      $result = new \metastore\ThriftHiveMetastore_flushCache_result();
 +      $result->read($this->input_);
 +      $this->input_->readMessageEnd();
 +    }
 +    return;
 +  }
 +
  }
  
  // HELPER FUNCTIONS AND STRUCTURES

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 531a475,dc348ef..6bd2728
mode 100644,100755..100755
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@@ -15,139 -16,152 +16,153 @@@ from thrift.transport import TSSLSocke
  from thrift.transport import THttpClient
  from thrift.protocol import TBinaryProtocol
  
- import ThriftHiveMetastore
- from ttypes import *
+ from hive_metastore import ThriftHiveMetastore
+ from hive_metastore.ttypes import *
  
  if len(sys.argv) <= 1 or sys.argv[1] == '--help':
-   print ''
-   print 'Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] function [arg1 [arg2...]]'
-   print ''
-   print 'Functions:'
-   print '  string getMetaConf(string key)'
-   print '  void setMetaConf(string key, string value)'
-   print '  void create_database(Database database)'
-   print '  Database get_database(string name)'
-   print '  void drop_database(string name, bool deleteData, bool cascade)'
-   print '   get_databases(string pattern)'
-   print '   get_all_databases()'
-   print '  void alter_database(string dbname, Database db)'
-   print '  Type get_type(string name)'
-   print '  bool create_type(Type type)'
-   print '  bool drop_type(string type)'
-   print '   get_type_all(string name)'
-   print '   get_fields(string db_name, string table_name)'
-   print '   get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)'
-   print '   get_schema(string db_name, string table_name)'
-   print '   get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)'
-   print '  void create_table(Table tbl)'
-   print '  void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)'
-   print '  void drop_table(string dbname, string name, bool deleteData)'
-   print '  void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)'
-   print '   get_tables(string db_name, string pattern)'
-   print '   get_all_tables(string db_name)'
-   print '  Table get_table(string dbname, string tbl_name)'
-   print '   get_table_objects_by_name(string dbname,  tbl_names)'
-   print '   get_table_names_by_filter(string dbname, string filter, i16 max_tables)'
-   print '  void alter_table(string dbname, string tbl_name, Table new_tbl)'
-   print '  void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)'
-   print '  void alter_table_with_cascade(string dbname, string tbl_name, Table new_tbl, bool cascade)'
-   print '  Partition add_partition(Partition new_part)'
-   print '  Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context)'
-   print '  i32 add_partitions( new_parts)'
-   print '  i32 add_partitions_pspec( new_parts)'
-   print '  Partition append_partition(string db_name, string tbl_name,  part_vals)'
-   print '  AddPartitionsResult add_partitions_req(AddPartitionsRequest request)'
-   print '  Partition append_partition_with_environment_context(string db_name, string tbl_name,  part_vals, EnvironmentContext environment_context)'
-   print '  Partition append_partition_by_name(string db_name, string tbl_name, string part_name)'
-   print '  Partition append_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, EnvironmentContext environment_context)'
-   print '  bool drop_partition(string db_name, string tbl_name,  part_vals, bool deleteData)'
-   print '  bool drop_partition_with_environment_context(string db_name, string tbl_name,  part_vals, bool deleteData, EnvironmentContext environment_context)'
-   print '  bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)'
-   print '  bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext environment_context)'
-   print '  DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)'
-   print '  Partition get_partition(string db_name, string tbl_name,  part_vals)'
-   print '  Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)'
-   print '  Partition get_partition_with_auth(string db_name, string tbl_name,  part_vals, string user_name,  group_names)'
-   print '  Partition get_partition_by_name(string db_name, string tbl_name, string part_name)'
-   print '   get_partitions(string db_name, string tbl_name, i16 max_parts)'
-   print '   get_partitions_with_auth(string db_name, string tbl_name, i16 max_parts, string user_name,  group_names)'
-   print '   get_partitions_pspec(string db_name, string tbl_name, i32 max_parts)'
-   print '   get_partition_names(string db_name, string tbl_name, i16 max_parts)'
-   print '   get_partitions_ps(string db_name, string tbl_name,  part_vals, i16 max_parts)'
-   print '   get_partitions_ps_with_auth(string db_name, string tbl_name,  part_vals, i16 max_parts, string user_name,  group_names)'
-   print '   get_partition_names_ps(string db_name, string tbl_name,  part_vals, i16 max_parts)'
-   print '   get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)'
-   print '   get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts)'
-   print '  PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req)'
-   print '   get_partitions_by_names(string db_name, string tbl_name,  names)'
-   print '  void alter_partition(string db_name, string tbl_name, Partition new_part)'
-   print '  void alter_partitions(string db_name, string tbl_name,  new_parts)'
-   print '  void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)'
-   print '  void rename_partition(string db_name, string tbl_name,  part_vals, Partition new_part)'
-   print '  bool partition_name_has_valid_characters( part_vals, bool throw_exception)'
-   print '  string get_config_value(string name, string defaultValue)'
-   print '   partition_name_to_vals(string part_name)'
-   print '   partition_name_to_spec(string part_name)'
-   print '  void markPartitionForEvent(string db_name, string tbl_name,  part_vals, PartitionEventType eventType)'
-   print '  bool isPartitionMarkedForEvent(string db_name, string tbl_name,  part_vals, PartitionEventType eventType)'
-   print '  Index add_index(Index new_index, Table index_table)'
-   print '  void alter_index(string dbname, string base_tbl_name, string idx_name, Index new_idx)'
-   print '  bool drop_index_by_name(string db_name, string tbl_name, string index_name, bool deleteData)'
-   print '  Index get_index_by_name(string db_name, string tbl_name, string index_name)'
-   print '   get_indexes(string db_name, string tbl_name, i16 max_indexes)'
-   print '   get_index_names(string db_name, string tbl_name, i16 max_indexes)'
-   print '  bool update_table_column_statistics(ColumnStatistics stats_obj)'
-   print '  bool update_partition_column_statistics(ColumnStatistics stats_obj)'
-   print '  ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)'
-   print '  ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)'
-   print '  TableStatsResult get_table_statistics_req(TableStatsRequest request)'
-   print '  PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)'
-   print '  AggrStats get_aggr_stats_for(PartitionsStatsRequest request)'
-   print '  bool set_aggr_stats_for(SetPartitionsStatsRequest request)'
-   print '  bool delete_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)'
-   print '  bool delete_table_column_statistics(string db_name, string tbl_name, string col_name)'
-   print '  void create_function(Function func)'
-   print '  void drop_function(string dbName, string funcName)'
-   print '  void alter_function(string dbName, string funcName, Function newFunc)'
-   print '   get_functions(string dbName, string pattern)'
-   print '  Function get_function(string dbName, string funcName)'
-   print '  bool create_role(Role role)'
-   print '  bool drop_role(string role_name)'
-   print '   get_role_names()'
-   print '  bool grant_role(string role_name, string principal_name, PrincipalType principal_type, string grantor, PrincipalType grantorType, bool grant_option)'
-   print '  bool revoke_role(string role_name, string principal_name, PrincipalType principal_type)'
-   print '   list_roles(string principal_name, PrincipalType principal_type)'
-   print '  GrantRevokeRoleResponse grant_revoke_role(GrantRevokeRoleRequest request)'
-   print '  GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest request)'
-   print '  GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(GetRoleGrantsForPrincipalRequest request)'
-   print '  PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, string user_name,  group_names)'
-   print '   list_privileges(string principal_name, PrincipalType principal_type, HiveObjectRef hiveObject)'
-   print '  bool grant_privileges(PrivilegeBag privileges)'
-   print '  bool revoke_privileges(PrivilegeBag privileges)'
-   print '  GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilegeRequest request)'
-   print '   set_ugi(string user_name,  group_names)'
-   print '  string get_delegation_token(string token_owner, string renewer_kerberos_principal_name)'
-   print '  i64 renew_delegation_token(string token_str_form)'
-   print '  void cancel_delegation_token(string token_str_form)'
-   print '  GetOpenTxnsResponse get_open_txns()'
-   print '  GetOpenTxnsInfoResponse get_open_txns_info()'
-   print '  OpenTxnsResponse open_txns(OpenTxnRequest rqst)'
-   print '  void abort_txn(AbortTxnRequest rqst)'
-   print '  void commit_txn(CommitTxnRequest rqst)'
-   print '  LockResponse lock(LockRequest rqst)'
-   print '  LockResponse check_lock(CheckLockRequest rqst)'
-   print '  void unlock(UnlockRequest rqst)'
-   print '  ShowLocksResponse show_locks(ShowLocksRequest rqst)'
-   print '  void heartbeat(HeartbeatRequest ids)'
-   print '  HeartbeatTxnRangeResponse heartbeat_txn_range(HeartbeatTxnRangeRequest txns)'
-   print '  void compact(CompactionRequest rqst)'
-   print '  ShowCompactResponse show_compact(ShowCompactRequest rqst)'
-   print '  void add_dynamic_partitions(AddDynamicPartitions rqst)'
-   print '  NotificationEventResponse get_next_notification(NotificationEventRequest rqst)'
-   print '  CurrentNotificationEventId get_current_notificationEventId()'
-   print '  FireEventResponse fire_listener_event(FireEventRequest rqst)'
-   print '  void flushCache()'
-   print ''
+   print('')
+   print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] function [arg1 [arg2...]]')
+   print('')
+   print('Functions:')
+   print('  string getMetaConf(string key)')
+   print('  void setMetaConf(string key, string value)')
+   print('  void create_database(Database database)')
+   print('  Database get_database(string name)')
+   print('  void drop_database(string name, bool deleteData, bool cascade)')
+   print('   get_databases(string pattern)')
+   print('   get_all_databases()')
+   print('  void alter_database(string dbname, Database db)')
+   print('  Type get_type(string name)')
+   print('  bool create_type(Type type)')
+   print('  bool drop_type(string type)')
+   print('   get_type_all(string name)')
+   print('   get_fields(string db_name, string table_name)')
+   print('   get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)')
+   print('   get_schema(string db_name, string table_name)')
+   print('   get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)')
+   print('  void create_table(Table tbl)')
+   print('  void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)')
+   print('  void drop_table(string dbname, string name, bool deleteData)')
+   print('  void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)')
+   print('   get_tables(string db_name, string pattern)')
+   print('   get_all_tables(string db_name)')
+   print('  Table get_table(string dbname, string tbl_name)')
+   print('   get_table_objects_by_name(string dbname,  tbl_names)')
+   print('   get_table_names_by_filter(string dbname, string filter, i16 max_tables)')
+   print('  void alter_table(string dbname, string tbl_name, Table new_tbl)')
+   print('  void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)')
+   print('  void alter_table_with_cascade(string dbname, string tbl_name, Table new_tbl, bool cascade)')
+   print('  Partition add_partition(Partition new_part)')
+   print('  Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context)')
+   print('  i32 add_partitions( new_parts)')
+   print('  i32 add_partitions_pspec( new_parts)')
+   print('  Partition append_partition(string db_name, string tbl_name,  part_vals)')
+   print('  AddPartitionsResult add_partitions_req(AddPartitionsRequest request)')
+   print('  Partition append_partition_with_environment_context(string db_name, string tbl_name,  part_vals, EnvironmentContext environment_context)')
+   print('  Partition append_partition_by_name(string db_name, string tbl_name, string part_name)')
+   print('  Partition append_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, EnvironmentContext environment_context)')
+   print('  bool drop_partition(string db_name, string tbl_name,  part_vals, bool deleteData)')
+   print('  bool drop_partition_with_environment_context(string db_name, string tbl_name,  part_vals, bool deleteData, EnvironmentContext environment_context)')
+   print('  bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)')
+   print('  bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext environment_context)')
+   print('  DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)')
+   print('  Partition get_partition(string db_name, string tbl_name,  part_vals)')
+   print('  Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)')
+   print('  Partition get_partition_with_auth(string db_name, string tbl_name,  part_vals, string user_name,  group_names)')
+   print('  Partition get_partition_by_name(string db_name, string tbl_name, string part_name)')
+   print('   get_partitions(string db_name, string tbl_name, i16 max_parts)')
+   print('   get_partitions_with_auth(string db_name, string tbl_name, i16 max_parts, string user_name,  group_names)')
+   print('   get_partitions_pspec(string db_name, string tbl_name, i32 max_parts)')
+   print('   get_partition_names(string db_name, string tbl_name, i16 max_parts)')
+   print('   get_partitions_ps(string db_name, string tbl_name,  part_vals, i16 max_parts)')
+   print('   get_partitions_ps_with_auth(string db_name, string tbl_name,  part_vals, i16 max_parts, string user_name,  group_names)')
+   print('   get_partition_names_ps(string db_name, string tbl_name,  part_vals, i16 max_parts)')
+   print('   get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)')
+   print('   get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts)')
+   print('  PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req)')
+   print('   get_partitions_by_names(string db_name, string tbl_name,  names)')
+   print('  void alter_partition(string db_name, string tbl_name, Partition new_part)')
+   print('  void alter_partitions(string db_name, string tbl_name,  new_parts)')
+   print('  void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)')
+   print('  void rename_partition(string db_name, string tbl_name,  part_vals, Partition new_part)')
+   print('  bool partition_name_has_valid_characters( part_vals, bool throw_exception)')
+   print('  string get_config_value(string name, string defaultValue)')
+   print('   partition_name_to_vals(string part_name)')
+   print('   partition_name_to_spec(string part_name)')
+   print('  void markPartitionForEvent(string db_name, string tbl_name,  part_vals, PartitionEventType eventType)')
+   print('  bool isPartitionMarkedForEvent(string db_name, string tbl_name,  part_vals, PartitionEventType eventType)')
+   print('  Index add_index(Index new_index, Table index_table)')
+   print('  void alter_index(string dbname, string base_tbl_name, string idx_name, Index new_idx)')
+   print('  bool drop_index_by_name(string db_name, string tbl_name, string index_name, bool deleteData)')
+   print('  Index get_index_by_name(string db_name, string tbl_name, string index_name)')
+   print('   get_indexes(string db_name, string tbl_name, i16 max_indexes)')
+   print('   get_index_names(string db_name, string tbl_name, i16 max_indexes)')
+   print('  bool update_table_column_statistics(ColumnStatistics stats_obj)')
+   print('  bool update_partition_column_statistics(ColumnStatistics stats_obj)')
+   print('  ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)')
+   print('  ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)')
+   print('  TableStatsResult get_table_statistics_req(TableStatsRequest request)')
+   print('  PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)')
+   print('  AggrStats get_aggr_stats_for(PartitionsStatsRequest request)')
+   print('  bool set_aggr_stats_for(SetPartitionsStatsRequest request)')
+   print('  bool delete_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)')
+   print('  bool delete_table_column_statistics(string db_name, string tbl_name, string col_name)')
+   print('  void create_function(Function func)')
+   print('  void drop_function(string dbName, string funcName)')
+   print('  void alter_function(string dbName, string funcName, Function newFunc)')
+   print('   get_functions(string dbName, string pattern)')
+   print('  Function get_function(string dbName, string funcName)')
+   print('  GetAllFunctionsResponse get_all_functions()')
+   print('  bool create_role(Role role)')
+   print('  bool drop_role(string role_name)')
+   print('   get_role_names()')
+   print('  bool grant_role(string role_name, string principal_name, PrincipalType principal_type, string grantor, PrincipalType grantorType, bool grant_option)')
+   print('  bool revoke_role(string role_name, string principal_name, PrincipalType principal_type)')
+   print('   list_roles(string principal_name, PrincipalType principal_type)')
+   print('  GrantRevokeRoleResponse grant_revoke_role(GrantRevokeRoleRequest request)')
+   print('  GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest request)')
+   print('  GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(GetRoleGrantsForPrincipalRequest request)')
+   print('  PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, string user_name,  group_names)')
+   print('   list_privileges(string principal_name, PrincipalType principal_type, HiveObjectRef hiveObject)')
+   print('  bool grant_privileges(PrivilegeBag privileges)')
+   print('  bool revoke_privileges(PrivilegeBag privileges)')
+   print('  GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilegeRequest request)')
+   print('   set_ugi(string user_name,  group_names)')
+   print('  string get_delegation_token(string token_owner, string renewer_kerberos_principal_name)')
+   print('  i64 renew_delegation_token(string token_str_form)')
+   print('  void cancel_delegation_token(string token_str_form)')
+   print('  GetOpenTxnsResponse get_open_txns()')
+   print('  GetOpenTxnsInfoResponse get_open_txns_info()')
+   print('  OpenTxnsResponse open_txns(OpenTxnRequest rqst)')
+   print('  void abort_txn(AbortTxnRequest rqst)')
+   print('  void commit_txn(CommitTxnRequest rqst)')
+   print('  LockResponse lock(LockRequest rqst)')
+   print('  LockResponse check_lock(CheckLockRequest rqst)')
+   print('  void unlock(UnlockRequest rqst)')
+   print('  ShowLocksResponse show_locks(ShowLocksRequest rqst)')
+   print('  void heartbeat(HeartbeatRequest ids)')
+   print('  HeartbeatTxnRangeResponse heartbeat_txn_range(HeartbeatTxnRangeRequest txns)')
+   print('  void compact(CompactionRequest rqst)')
+   print('  ShowCompactResponse show_compact(ShowCompactRequest rqst)')
+   print('  void add_dynamic_partitions(AddDynamicPartitions rqst)')
+   print('  NotificationEventResponse get_next_notification(NotificationEventRequest rqst)')
+   print('  CurrentNotificationEventId get_current_notificationEventId()')
+   print('  FireEventResponse fire_listener_event(FireEventRequest rqst)')
++  print('  void flushCache()')
+   print('  string getName()')
+   print('  string getVersion()')
+   print('  fb_status getStatus()')
+   print('  string getStatusDetails()')
+   print('   getCounters()')
+   print('  i64 getCounter(string key)')
+   print('  void setOption(string key, string value)')
+   print('  string getOption(string key)')
+   print('   getOptions()')
++  print('  string getCpuProfile(i32 profileDurationInSec)')
+   print('  i64 aliveSince()')
 -  print('  reflection_limited.Service getLimitedReflection()')
+   print('  void reinitialize()')
+   print('  void shutdown()')
+   print('')
    sys.exit(0)
  
  pp = pprint.PrettyPrinter(indent = 2)
@@@ -936,14 -961,86 +962,92 @@@ elif cmd == 'fire_listener_event'
      sys.exit(1)
    pp.pprint(client.fire_listener_event(eval(args[0]),))
  
 +elif cmd == 'flushCache':
 +  if len(args) != 0:
-     print 'flushCache requires 0 args'
++    print('flushCache requires 0 args')
 +    sys.exit(1)
 +  pp.pprint(client.flushCache())
 +
+ elif cmd == 'getName':
+   if len(args) != 0:
+     print('getName requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getName())
+ 
+ elif cmd == 'getVersion':
+   if len(args) != 0:
+     print('getVersion requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getVersion())
+ 
+ elif cmd == 'getStatus':
+   if len(args) != 0:
+     print('getStatus requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getStatus())
+ 
+ elif cmd == 'getStatusDetails':
+   if len(args) != 0:
+     print('getStatusDetails requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getStatusDetails())
+ 
+ elif cmd == 'getCounters':
+   if len(args) != 0:
+     print('getCounters requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getCounters())
+ 
+ elif cmd == 'getCounter':
+   if len(args) != 1:
+     print('getCounter requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.getCounter(args[0],))
+ 
+ elif cmd == 'setOption':
+   if len(args) != 2:
+     print('setOption requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.setOption(args[0],args[1],))
+ 
+ elif cmd == 'getOption':
+   if len(args) != 1:
+     print('getOption requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.getOption(args[0],))
+ 
+ elif cmd == 'getOptions':
+   if len(args) != 0:
+     print('getOptions requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getOptions())
+ 
++elif cmd == 'getCpuProfile':
++  if len(args) != 1:
++    print('getCpuProfile requires 1 args')
++    sys.exit(1)
++  pp.pprint(client.getCpuProfile(eval(args[0]),))
++
+ elif cmd == 'aliveSince':
+   if len(args) != 0:
+     print('aliveSince requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.aliveSince())
+ 
 -elif cmd == 'getLimitedReflection':
 -  if len(args) != 0:
 -    print('getLimitedReflection requires 0 args')
 -    sys.exit(1)
 -  pp.pprint(client.getLimitedReflection())
 -
+ elif cmd == 'reinitialize':
+   if len(args) != 0:
+     print('reinitialize requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.reinitialize())
+ 
+ elif cmd == 'shutdown':
+   if len(args) != 0:
+     print('shutdown requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.shutdown())
+ 
  else:
-   print 'Unrecognized method %s' % cmd
+   print('Unrecognized method %s' % cmd)
    sys.exit(1)
  
  transport.close()

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 4aad3aa,9e460f0..dd75b01
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@@ -1018,9 -1021,6 +1021,9 @@@ class Iface(fb303.FacebookService.Iface
      """
      pass
  
-   def flushCache(self, ):
++  def flushCache(self):
 +    pass
 +
  
  class Client(fb303.FacebookService.Client, Iface):
    """
@@@ -5438,29 -5589,6 +5592,30 @@@
        return result.success
      raise TApplicationException(TApplicationException.MISSING_RESULT, "fire_listener_event failed: unknown result");
  
-   def flushCache(self, ):
++  def flushCache(self):
 +    self.send_flushCache()
 +    self.recv_flushCache()
 +
-   def send_flushCache(self, ):
++  def send_flushCache(self):
 +    self._oprot.writeMessageBegin('flushCache', TMessageType.CALL, self._seqid)
 +    args = flushCache_args()
 +    args.write(self._oprot)
 +    self._oprot.writeMessageEnd()
 +    self._oprot.trans.flush()
 +
-   def recv_flushCache(self, ):
-     (fname, mtype, rseqid) = self._iprot.readMessageBegin()
++  def recv_flushCache(self):
++    iprot = self._iprot
++    (fname, mtype, rseqid) = iprot.readMessageBegin()
 +    if mtype == TMessageType.EXCEPTION:
 +      x = TApplicationException()
-       x.read(self._iprot)
-       self._iprot.readMessageEnd()
++      x.read(iprot)
++      iprot.readMessageEnd()
 +      raise x
 +    result = flushCache_result()
-     result.read(self._iprot)
-     self._iprot.readMessageEnd()
++    result.read(iprot)
++    iprot.readMessageEnd()
 +    return
 +
  
  class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
    def __init__(self, handler):
@@@ -27411,87 -29264,3 +29303,95 @@@ class fire_listener_event_result
  
    def __ne__(self, other):
      return not (self == other)
 +
 +class flushCache_args:
 +
 +  thrift_spec = (
 +  )
 +
 +  def read(self, iprot):
 +    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
 +      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
 +      return
 +    iprot.readStructBegin()
 +    while True:
 +      (fname, ftype, fid) = iprot.readFieldBegin()
 +      if ftype == TType.STOP:
 +        break
 +      else:
 +        iprot.skip(ftype)
 +      iprot.readFieldEnd()
 +    iprot.readStructEnd()
 +
 +  def write(self, oprot):
 +    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
 +      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
 +      return
 +    oprot.writeStructBegin('flushCache_args')
 +    oprot.writeFieldStop()
 +    oprot.writeStructEnd()
 +
 +  def validate(self):
 +    return
 +
 +
++  def __hash__(self):
++    value = 17
++    return value
++
 +  def __repr__(self):
 +    L = ['%s=%r' % (key, value)
 +      for key, value in self.__dict__.iteritems()]
 +    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 +
 +  def __eq__(self, other):
 +    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
 +
 +  def __ne__(self, other):
 +    return not (self == other)
 +
 +class flushCache_result:
 +
 +  thrift_spec = (
 +  )
 +
 +  def read(self, iprot):
 +    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
 +      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
 +      return
 +    iprot.readStructBegin()
 +    while True:
 +      (fname, ftype, fid) = iprot.readFieldBegin()
 +      if ftype == TType.STOP:
 +        break
 +      else:
 +        iprot.skip(ftype)
 +      iprot.readFieldEnd()
 +    iprot.readStructEnd()
 +
 +  def write(self, oprot):
 +    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
 +      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
 +      return
 +    oprot.writeStructBegin('flushCache_result')
 +    oprot.writeFieldStop()
 +    oprot.writeStructEnd()
 +
 +  def validate(self):
 +    return
 +
 +
++  def __hash__(self):
++    value = 17
++    return value
++
 +  def __repr__(self):
 +    L = ['%s=%r' % (key, value)
 +      for key, value in self.__dict__.iteritems()]
 +    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 +
 +  def __eq__(self, other):
 +    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
 +
 +  def __ne__(self, other):
 +    return not (self == other)

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------


[43/50] [abbrv] hive git commit: HIVE-11568 : merge master into branch (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Graph.java
----------------------------------------------------------------------
diff --cc ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Graph.java
index 35aa6cb,e621cfa..0a13175
--- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Graph.java
+++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Graph.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Graph implements org.apache.thrift.TBase<Graph, Graph._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Graph implements org.apache.thrift.TBase<Graph, Graph._Fields>, java.io.Serializable, Cloneable, Comparable<Graph> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Graph");
  
    private static final org.apache.thrift.protocol.TField NODE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("nodeType", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Operator.java
----------------------------------------------------------------------
diff --cc ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Operator.java
index f1c9e2d,1b18aab..991974c
--- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Operator.java
+++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Operator.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Operator implements org.apache.thrift.TBase<Operator, Operator._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Operator implements org.apache.thrift.TBase<Operator, Operator._Fields>, java.io.Serializable, Cloneable, Comparable<Operator> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Operator");
  
    private static final org.apache.thrift.protocol.TField OPERATOR_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("operatorId", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Query.java
----------------------------------------------------------------------
diff --cc ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Query.java
index e0d77e8,5c5e0f8..f98a7e1
--- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Query.java
+++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Query.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Query implements org.apache.thrift.TBase<Query, Query._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Query implements org.apache.thrift.TBase<Query, Query._Fields>, java.io.Serializable, Cloneable, Comparable<Query> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Query");
  
    private static final org.apache.thrift.protocol.TField QUERY_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("queryId", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/QueryPlan.java
----------------------------------------------------------------------
diff --cc ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/QueryPlan.java
index e8566a5,d340d58..0994fda
--- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/QueryPlan.java
+++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/QueryPlan.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class QueryPlan implements org.apache.thrift.TBase<QueryPlan, QueryPlan._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class QueryPlan implements org.apache.thrift.TBase<QueryPlan, QueryPlan._Fields>, java.io.Serializable, Cloneable, Comparable<QueryPlan> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("QueryPlan");
  
    private static final org.apache.thrift.protocol.TField QUERIES_FIELD_DESC = new org.apache.thrift.protocol.TField("queries", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Stage.java
----------------------------------------------------------------------
diff --cc ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Stage.java
index c341db2,7353933..e0cd86c
--- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Stage.java
+++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Stage.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Stage implements org.apache.thrift.TBase<Stage, Stage._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Stage implements org.apache.thrift.TBase<Stage, Stage._Fields>, java.io.Serializable, Cloneable, Comparable<Stage> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Stage");
  
    private static final org.apache.thrift.protocol.TField STAGE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("stageId", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Task.java
----------------------------------------------------------------------
diff --cc ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Task.java
index fc4313f,09a2e7f..66e5e30
--- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Task.java
+++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/Task.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Task implements org.apache.thrift.TBase<Task, Task._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Task implements org.apache.thrift.TBase<Task, Task._Fields>, java.io.Serializable, Cloneable, Comparable<Task> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Task");
  
    private static final org.apache.thrift.protocol.TField TASK_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("taskId", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 7f918f8,c0c1b2e..ff3e6c8
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@@ -413,14 -409,8 +409,14 @@@ public class Driver implements CommandP
            getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK,
                HiveSemanticAnalyzerHook.class);
  
 +      // Flush the metastore cache.  This assures that we don't pick up objects from a previous
 +      // query running in this same thread.  This has to be done after we get our semantic
 +      // analyzer (this is when the connection to the metastore is made) but before we analyze,
 +      // because at that point we need access to the objects.
 +      Hive.get().getMSC().flushCache();
 +
        // Do semantic analysis and plan generation
-       if (saHooks != null) {
+       if (saHooks != null && !saHooks.isEmpty()) {
          HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
          hookCtx.setConf(conf);
          hookCtx.setUserName(userName);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/InnerStruct.java
----------------------------------------------------------------------
diff --cc serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/InnerStruct.java
index 72a28ae,db325af..68bb885
--- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/InnerStruct.java
+++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/InnerStruct.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class InnerStruct implements org.apache.thrift.TBase<InnerStruct, InnerStruct._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class InnerStruct implements org.apache.thrift.TBase<InnerStruct, InnerStruct._Fields>, java.io.Serializable, Cloneable, Comparable<InnerStruct> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InnerStruct");
  
    private static final org.apache.thrift.protocol.TField FIELD0_FIELD_DESC = new org.apache.thrift.protocol.TField("field0", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java
----------------------------------------------------------------------
diff --cc serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java
index 1b708dd,1232ff9..208fa82
--- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java
+++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde/test/ThriftTestObj.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ThriftTestObj implements org.apache.thrift.TBase<ThriftTestObj, ThriftTestObj._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ThriftTestObj implements org.apache.thrift.TBase<ThriftTestObj, ThriftTestObj._Fields>, java.io.Serializable, Cloneable, Comparable<ThriftTestObj> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ThriftTestObj");
  
    private static final org.apache.thrift.protocol.TField FIELD1_FIELD_DESC = new org.apache.thrift.protocol.TField("field1", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java
----------------------------------------------------------------------
diff --cc serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java
index 07ea8b9,4d2f5bf..6d32947
--- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java
+++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/Complex.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields>, java.io.Serializable, Cloneable, Comparable<Complex> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Complex");
  
    private static final org.apache.thrift.protocol.TField AINT_FIELD_DESC = new org.apache.thrift.protocol.TField("aint", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/IntString.java
----------------------------------------------------------------------
diff --cc serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/IntString.java
index bd580ad,23d7363..26e7b38
--- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/IntString.java
+++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/IntString.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class IntString implements org.apache.thrift.TBase<IntString, IntString._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class IntString implements org.apache.thrift.TBase<IntString, IntString._Fields>, java.io.Serializable, Cloneable, Comparable<IntString> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IntString");
  
    private static final org.apache.thrift.protocol.TField MYINT_FIELD_DESC = new org.apache.thrift.protocol.TField("myint", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java
----------------------------------------------------------------------
diff --cc serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java
index 386fef9,9447708..d937a9c
--- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java
+++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MegaStruct.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class MegaStruct implements org.apache.thrift.TBase<MegaStruct, MegaStruct._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class MegaStruct implements org.apache.thrift.TBase<MegaStruct, MegaStruct._Fields>, java.io.Serializable, Cloneable, Comparable<MegaStruct> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MegaStruct");
  
    private static final org.apache.thrift.protocol.TField MY_BOOL_FIELD_DESC = new org.apache.thrift.protocol.TField("my_bool", org.apache.thrift.protocol.TType.BOOL, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MiniStruct.java
----------------------------------------------------------------------
diff --cc serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MiniStruct.java
index b1d3946,d8c46f4..c25156a
--- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MiniStruct.java
+++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/MiniStruct.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class MiniStruct implements org.apache.thrift.TBase<MiniStruct, MiniStruct._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class MiniStruct implements org.apache.thrift.TBase<MiniStruct, MiniStruct._Fields>, java.io.Serializable, Cloneable, Comparable<MiniStruct> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MiniStruct");
  
    private static final org.apache.thrift.protocol.TField MY_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("my_string", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java
----------------------------------------------------------------------
diff --cc serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java
index 676f2b2,58498b0..d1bd61d
--- a/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java
+++ b/serde/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/serde2/thrift/test/SetIntString.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class SetIntString implements org.apache.thrift.TBase<SetIntString, SetIntString._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class SetIntString implements org.apache.thrift.TBase<SetIntString, SetIntString._Fields>, java.io.Serializable, Cloneable, Comparable<SetIntString> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SetIntString");
  
    private static final org.apache.thrift.protocol.TField S_INT_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("sIntString", org.apache.thrift.protocol.TType.SET, (short)1);


[07/50] [abbrv] hive git commit: HIVE-11387: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : fix reduce_deduplicate optimization (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez, Hari Subramaniyan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
index 6960bee..32514ca 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out
@@ -1855,8 +1855,7 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (GROUP, 2)
-        Reducer 4 <- Reducer 3 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (GROUP PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -1956,7 +1955,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
                         sort order: +++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: int)
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                         tag: -1
                         auto parallelism: false
@@ -1968,22 +1967,6 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE Column stats: NONE
-                  tag: -1
-                  value expressions: _col2 (type: int)
-                  auto parallelism: false
-            Execution mode: vectorized
-        Reducer 4 
-            Needs Tagging: false
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int)
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
                       Input definition

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index 1c49f52..9756b0c 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -6931,13 +6931,12 @@ Vertex dependency in root stage
 Reducer 2 <- Map 1 (SIMPLE_EDGE)
 Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
-Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
 
 Stage-0
    Fetch Operator
       limit:-1
       Stage-1
-         Reducer 5
+         Reducer 4
          File Output Operator [FS_14]
             compressed:true
             Statistics:Num rows: 26 Data size: 6214 Basic stats: COMPLETE Column stats: COMPLETE
@@ -6948,52 +6947,42 @@ Stage-0
                PTF Operator [PTF_11]
                   Function definitions:[{"Input definition":{"type:":"WINDOWING"}},{"partition by:":"_col0","name:":"windowingtablefunction","order by:":"_col1"}]
                   Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator [SEL_10]
+                  Group By Operator [GBY_8]
+                  |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int)
                   |  outputColumnNames:["_col0","_col1","_col2"]
                   |  Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
-                  |<-Reducer 4 [SIMPLE_EDGE]
-                     Reduce Output Operator [RS_9]
-                        key expressions:_col0 (type: string), _col1 (type: string)
+                  |<-Reducer 3 [SIMPLE_EDGE]
+                     Reduce Output Operator [RS_7]
+                        key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: int)
                         Map-reduce partition columns:_col0 (type: string)
-                        sort order:++
+                        sort order:+++
                         Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions:_col2 (type: int)
-                        Group By Operator [GBY_8]
+                        Group By Operator [GBY_6]
                         |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int)
                         |  outputColumnNames:["_col0","_col1","_col2"]
                         |  Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
-                        |<-Reducer 3 [SIMPLE_EDGE]
-                           Reduce Output Operator [RS_7]
-                              key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: int)
-                              Map-reduce partition columns:_col0 (type: string), _col1 (type: string), _col2 (type: int)
+                        |<-Reducer 2 [SIMPLE_EDGE]
+                           Reduce Output Operator [RS_5]
+                              key expressions:_col2 (type: string), _col1 (type: string), _col5 (type: int)
+                              Map-reduce partition columns:rand() (type: double)
                               sort order:+++
-                              Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
-                              Group By Operator [GBY_6]
-                              |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: int)
-                              |  outputColumnNames:["_col0","_col1","_col2"]
-                              |  Statistics:Num rows: 26 Data size: 5798 Basic stats: COMPLETE Column stats: COMPLETE
-                              |<-Reducer 2 [SIMPLE_EDGE]
-                                 Reduce Output Operator [RS_5]
-                                    key expressions:_col2 (type: string), _col1 (type: string), _col5 (type: int)
-                                    Map-reduce partition columns:rand() (type: double)
-                                    sort order:+++
-                                    Statistics:Num rows: 26 Data size: 12766 Basic stats: COMPLETE Column stats: COMPLETE
-                                    PTF Operator [PTF_3]
-                                       Function definitions:[{"Input definition":{"type:":"TABLE"}},{"Partition table definition":{"partition by:":"_col2","name:":"noop","order by:":"_col1"}}]
-                                       Statistics:Num rows: 26 Data size: 12766 Basic stats: COMPLETE Column stats: COMPLETE
-                                       Select Operator [SEL_2]
-                                       |  outputColumnNames:["_col1","_col2","_col5"]
-                                       |  Statistics:Num rows: 26 Data size: 12766 Basic stats: COMPLETE Column stats: COMPLETE
-                                       |<-Map 1 [SIMPLE_EDGE]
-                                          Reduce Output Operator [RS_1]
-                                             key expressions:p_mfgr (type: string), p_name (type: string)
-                                             Map-reduce partition columns:p_mfgr (type: string)
-                                             sort order:++
-                                             Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
-                                             value expressions:p_size (type: int)
-                                             TableScan [TS_0]
-                                                alias:part
-                                                Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                              Statistics:Num rows: 26 Data size: 12766 Basic stats: COMPLETE Column stats: COMPLETE
+                              PTF Operator [PTF_3]
+                                 Function definitions:[{"Input definition":{"type:":"TABLE"}},{"Partition table definition":{"partition by:":"_col2","name:":"noop","order by:":"_col1"}}]
+                                 Statistics:Num rows: 26 Data size: 12766 Basic stats: COMPLETE Column stats: COMPLETE
+                                 Select Operator [SEL_2]
+                                 |  outputColumnNames:["_col1","_col2","_col5"]
+                                 |  Statistics:Num rows: 26 Data size: 12766 Basic stats: COMPLETE Column stats: COMPLETE
+                                 |<-Map 1 [SIMPLE_EDGE]
+                                    Reduce Output Operator [RS_1]
+                                       key expressions:p_mfgr (type: string), p_name (type: string)
+                                       Map-reduce partition columns:p_mfgr (type: string)
+                                       sort order:++
+                                       Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
+                                       value expressions:p_size (type: int)
+                                       TableScan [TS_0]
+                                          alias:part
+                                          Statistics:Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain
 select abc.* 

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/tez/ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/ptf.q.out b/ql/src/test/results/clientpositive/tez/ptf.q.out
index 88d1a98..b134440 100644
--- a/ql/src/test/results/clientpositive/tez/ptf.q.out
+++ b/ql/src/test/results/clientpositive/tez/ptf.q.out
@@ -869,7 +869,6 @@ STAGE PLANS:
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -915,7 +914,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
                         sort order: +++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: int)
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
@@ -924,18 +923,6 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: int)
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int)
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
                       Input definition

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
index c2e9b1a..2dad1e7 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out
@@ -1857,7 +1857,6 @@ STAGE PLANS:
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
         Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -1957,7 +1956,7 @@ STAGE PLANS:
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
                         sort order: +++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: int)
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
                         tag: -1
                         auto parallelism: true
@@ -1969,22 +1968,6 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE Column stats: NONE
-                  tag: -1
-                  value expressions: _col2 (type: int)
-                  auto parallelism: true
-            Execution mode: vectorized
-        Reducer 4 
-            Needs Tagging: false
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int)
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE Column stats: NONE
                 PTF Operator
                   Function definitions:
                       Input definition

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/union_remove_6_subq.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out
index a38548f..05363e4 100644
--- a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out
@@ -447,10 +447,9 @@ WINDOW w AS (PARTITION BY key ORDER BY c ROWS UNBOUNDED PRECEDING)
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
+  Stage-2 depends on stages: Stage-1, Stage-3
+  Stage-3 is a root stage
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -507,7 +506,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: bigint)
                   sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
           TableScan
             Union
@@ -520,7 +519,7 @@ STAGE PLANS:
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: bigint)
                   sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
@@ -528,27 +527,6 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: bigint)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: bigint)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
@@ -581,7 +559,7 @@ STAGE PLANS:
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
-  Stage: Stage-4
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/vectorized_ptf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/vectorized_ptf.q.out
index 79edb0e..e65a880 100644
--- a/ql/src/test/results/clientpositive/vectorized_ptf.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_ptf.q.out
@@ -2001,8 +2001,7 @@ TOK_QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-2 depends on stages: Stage-1
-  Stage-3 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-3
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -2125,7 +2124,7 @@ STAGE PLANS:
             Reduce Output Operator
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
               sort order: +++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: int)
+              Map-reduce partition columns: _col0 (type: string)
               Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE
               tag: -1
               auto parallelism: false
@@ -2161,68 +2160,6 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 0
-#### A masked pattern was here ####
-            NumFilesPerFileSink: 1
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                properties:
-                  columns _col0,_col1,_col2
-                  columns.types string,string,int
-                  escape.delim \
-                  serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            TotalFiles: 1
-            GatherStats: false
-            MultiFileSpray: false
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            GatherStats: false
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE Column stats: NONE
-              tag: -1
-              value expressions: _col2 (type: int)
-              auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: -mr-10003
-            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-            properties:
-              columns _col0,_col1,_col2
-              columns.types string,string,int
-              escape.delim \
-              serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          
-              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-              properties:
-                columns _col0,_col1,_col2
-                columns.types string,string,int
-                escape.delim \
-                serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-              serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 13 Data size: 8021 Basic stats: COMPLETE Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition


[48/50] [abbrv] hive git commit: HIVE-11568 : merge master into branch (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
index 939c15a,e9088e0..c6ad69e
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class CompactionRequest implements org.apache.thrift.TBase<CompactionRequest, CompactionRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class CompactionRequest implements org.apache.thrift.TBase<CompactionRequest, CompactionRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CompactionRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CompactionRequest");
  
    private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
index bdcf21d,000670a..99e7a83
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class ConfigValSecurityException extends TException implements org.apache.thrift.TBase<ConfigValSecurityException, ConfigValSecurityException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class ConfigValSecurityException extends TException implements org.apache.thrift.TBase<ConfigValSecurityException, ConfigValSecurityException._Fields>, java.io.Serializable, Cloneable, Comparable<ConfigValSecurityException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ConfigValSecurityException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
index 8b8e5c4,d55d874..89abc78
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class CurrentNotificationEventId implements org.apache.thrift.TBase<CurrentNotificationEventId, CurrentNotificationEventId._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class CurrentNotificationEventId implements org.apache.thrift.TBase<CurrentNotificationEventId, CurrentNotificationEventId._Fields>, java.io.Serializable, Cloneable, Comparable<CurrentNotificationEventId> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CurrentNotificationEventId");
  
    private static final org.apache.thrift.protocol.TField EVENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("eventId", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
index c3c531d,56b7281..759eec9
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Database implements org.apache.thrift.TBase<Database, Database._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Database implements org.apache.thrift.TBase<Database, Database._Fields>, java.io.Serializable, Cloneable, Comparable<Database> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Database");
  
    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
index 3e02db5,0b406d6..b4a44a4
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Date implements org.apache.thrift.TBase<Date, Date._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Date implements org.apache.thrift.TBase<Date, Date._Fields>, java.io.Serializable, Cloneable, Comparable<Date> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Date");
  
    private static final org.apache.thrift.protocol.TField DAYS_SINCE_EPOCH_FIELD_DESC = new org.apache.thrift.protocol.TField("daysSinceEpoch", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
index e9a577d,7a3d4ed..7050334
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class DateColumnStatsData implements org.apache.thrift.TBase<DateColumnStatsData, DateColumnStatsData._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class DateColumnStatsData implements org.apache.thrift.TBase<DateColumnStatsData, DateColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<DateColumnStatsData> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DateColumnStatsData");
  
    private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.STRUCT, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
index ed8bb18,9215ce9..1f82543
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Decimal implements org.apache.thrift.TBase<Decimal, Decimal._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Decimal implements org.apache.thrift.TBase<Decimal, Decimal._Fields>, java.io.Serializable, Cloneable, Comparable<Decimal> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Decimal");
  
    private static final org.apache.thrift.protocol.TField UNSCALED_FIELD_DESC = new org.apache.thrift.protocol.TField("unscaled", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
index 951d479,e64ca36..02092dc
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class DecimalColumnStatsData implements org.apache.thrift.TBase<DecimalColumnStatsData, DecimalColumnStatsData._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class DecimalColumnStatsData implements org.apache.thrift.TBase<DecimalColumnStatsData, DecimalColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<DecimalColumnStatsData> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DecimalColumnStatsData");
  
    private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.STRUCT, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
index 4203fd8,2509ed5..52288e5
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class DoubleColumnStatsData implements org.apache.thrift.TBase<DoubleColumnStatsData, DoubleColumnStatsData._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class DoubleColumnStatsData implements org.apache.thrift.TBase<DoubleColumnStatsData, DoubleColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<DoubleColumnStatsData> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DoubleColumnStatsData");
  
    private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.DOUBLE, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
index 59a66f3,5e3a2d1..0d1e50d
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class DropPartitionsExpr implements org.apache.thrift.TBase<DropPartitionsExpr, DropPartitionsExpr._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class DropPartitionsExpr implements org.apache.thrift.TBase<DropPartitionsExpr, DropPartitionsExpr._Fields>, java.io.Serializable, Cloneable, Comparable<DropPartitionsExpr> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsExpr");
  
    private static final org.apache.thrift.protocol.TField EXPR_FIELD_DESC = new org.apache.thrift.protocol.TField("expr", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
index 1923f38,24536ba..46cc9a7
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class DropPartitionsRequest implements org.apache.thrift.TBase<DropPartitionsRequest, DropPartitionsRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class DropPartitionsRequest implements org.apache.thrift.TBase<DropPartitionsRequest, DropPartitionsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<DropPartitionsRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsRequest");
  
    private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
index b7f69f2,c139e65..09da136
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class DropPartitionsResult implements org.apache.thrift.TBase<DropPartitionsResult, DropPartitionsResult._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class DropPartitionsResult implements org.apache.thrift.TBase<DropPartitionsResult, DropPartitionsResult._Fields>, java.io.Serializable, Cloneable, Comparable<DropPartitionsResult> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsResult");
  
    private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
index ab0b399,6accb8d..3eabc86
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class EnvironmentContext implements org.apache.thrift.TBase<EnvironmentContext, EnvironmentContext._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class EnvironmentContext implements org.apache.thrift.TBase<EnvironmentContext, EnvironmentContext._Fields>, java.io.Serializable, Cloneable, Comparable<EnvironmentContext> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("EnvironmentContext");
  
    private static final org.apache.thrift.protocol.TField PROPERTIES_FIELD_DESC = new org.apache.thrift.protocol.TField("properties", org.apache.thrift.protocol.TType.MAP, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
index a993810,ba69622..e73edd4
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class FieldSchema implements org.apache.thrift.TBase<FieldSchema, FieldSchema._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class FieldSchema implements org.apache.thrift.TBase<FieldSchema, FieldSchema._Fields>, java.io.Serializable, Cloneable, Comparable<FieldSchema> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FieldSchema");
  
    private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
index 44b83da,f3d439c..25f9d54
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class FireEventRequest implements org.apache.thrift.TBase<FireEventRequest, FireEventRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class FireEventRequest implements org.apache.thrift.TBase<FireEventRequest, FireEventRequest._Fields>, java.io.Serializable, Cloneable, Comparable<FireEventRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FireEventRequest");
  
    private static final org.apache.thrift.protocol.TField SUCCESSFUL_FIELD_DESC = new org.apache.thrift.protocol.TField("successful", org.apache.thrift.protocol.TType.BOOL, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
index 051f411,d95ae06..6f277aa
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class FireEventResponse implements org.apache.thrift.TBase<FireEventResponse, FireEventResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class FireEventResponse implements org.apache.thrift.TBase<FireEventResponse, FireEventResponse._Fields>, java.io.Serializable, Cloneable, Comparable<FireEventResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FireEventResponse");
  
  

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
index c40e33d,50eff73..33c617e
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Function implements org.apache.thrift.TBase<Function, Function._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Function implements org.apache.thrift.TBase<Function, Function._Fields>, java.io.Serializable, Cloneable, Comparable<Function> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Function");
  
    private static final org.apache.thrift.protocol.TField FUNCTION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("functionName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
index 0000000,0a9e27b..170d8e7
mode 000000,100644..100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
@@@ -1,0 -1,447 +1,447 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.2)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GetAllFunctionsResponse implements org.apache.thrift.TBase<GetAllFunctionsResponse, GetAllFunctionsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetAllFunctionsResponse> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetAllFunctionsResponse");
+ 
+   private static final org.apache.thrift.protocol.TField FUNCTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("functions", org.apache.thrift.protocol.TType.LIST, (short)1);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new GetAllFunctionsResponseStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new GetAllFunctionsResponseTupleSchemeFactory());
+   }
+ 
+   private List<Function> functions; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     FUNCTIONS((short)1, "functions");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // FUNCTIONS
+           return FUNCTIONS;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
+   private static final _Fields optionals[] = {_Fields.FUNCTIONS};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.FUNCTIONS, new org.apache.thrift.meta_data.FieldMetaData("functions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Function.class))));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetAllFunctionsResponse.class, metaDataMap);
+   }
+ 
+   public GetAllFunctionsResponse() {
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public GetAllFunctionsResponse(GetAllFunctionsResponse other) {
+     if (other.isSetFunctions()) {
+       List<Function> __this__functions = new ArrayList<Function>(other.functions.size());
+       for (Function other_element : other.functions) {
+         __this__functions.add(new Function(other_element));
+       }
+       this.functions = __this__functions;
+     }
+   }
+ 
+   public GetAllFunctionsResponse deepCopy() {
+     return new GetAllFunctionsResponse(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.functions = null;
+   }
+ 
+   public int getFunctionsSize() {
+     return (this.functions == null) ? 0 : this.functions.size();
+   }
+ 
+   public java.util.Iterator<Function> getFunctionsIterator() {
+     return (this.functions == null) ? null : this.functions.iterator();
+   }
+ 
+   public void addToFunctions(Function elem) {
+     if (this.functions == null) {
+       this.functions = new ArrayList<Function>();
+     }
+     this.functions.add(elem);
+   }
+ 
+   public List<Function> getFunctions() {
+     return this.functions;
+   }
+ 
+   public void setFunctions(List<Function> functions) {
+     this.functions = functions;
+   }
+ 
+   public void unsetFunctions() {
+     this.functions = null;
+   }
+ 
+   /** Returns true if field functions is set (has been assigned a value) and false otherwise */
+   public boolean isSetFunctions() {
+     return this.functions != null;
+   }
+ 
+   public void setFunctionsIsSet(boolean value) {
+     if (!value) {
+       this.functions = null;
+     }
+   }
+ 
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case FUNCTIONS:
+       if (value == null) {
+         unsetFunctions();
+       } else {
+         setFunctions((List<Function>)value);
+       }
+       break;
+ 
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case FUNCTIONS:
+       return getFunctions();
+ 
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case FUNCTIONS:
+       return isSetFunctions();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof GetAllFunctionsResponse)
+       return this.equals((GetAllFunctionsResponse)that);
+     return false;
+   }
+ 
+   public boolean equals(GetAllFunctionsResponse that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_functions = true && this.isSetFunctions();
+     boolean that_present_functions = true && that.isSetFunctions();
+     if (this_present_functions || that_present_functions) {
+       if (!(this_present_functions && that_present_functions))
+         return false;
+       if (!this.functions.equals(that.functions))
+         return false;
+     }
+ 
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_functions = true && (isSetFunctions());
+     list.add(present_functions);
+     if (present_functions)
+       list.add(functions);
+ 
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(GetAllFunctionsResponse other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetFunctions()).compareTo(other.isSetFunctions());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetFunctions()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.functions, other.functions);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("GetAllFunctionsResponse(");
+     boolean first = true;
+ 
+     if (isSetFunctions()) {
+       sb.append("functions:");
+       if (this.functions == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.functions);
+       }
+       first = false;
+     }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     // check for sub-struct validity
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class GetAllFunctionsResponseStandardSchemeFactory implements SchemeFactory {
+     public GetAllFunctionsResponseStandardScheme getScheme() {
+       return new GetAllFunctionsResponseStandardScheme();
+     }
+   }
+ 
+   private static class GetAllFunctionsResponseStandardScheme extends StandardScheme<GetAllFunctionsResponse> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResponse struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // FUNCTIONS
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list524 = iprot.readListBegin();
+                 struct.functions = new ArrayList<Function>(_list524.size);
+                 Function _elem525;
+                 for (int _i526 = 0; _i526 < _list524.size; ++_i526)
+                 {
+                   _elem525 = new Function();
+                   _elem525.read(iprot);
+                   struct.functions.add(_elem525);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setFunctionsIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsResponse struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.functions != null) {
+         if (struct.isSetFunctions()) {
+           oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC);
+           {
+             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size()));
+             for (Function _iter527 : struct.functions)
+             {
+               _iter527.write(oprot);
+             }
+             oprot.writeListEnd();
+           }
+           oprot.writeFieldEnd();
+         }
+       }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class GetAllFunctionsResponseTupleSchemeFactory implements SchemeFactory {
+     public GetAllFunctionsResponseTupleScheme getScheme() {
+       return new GetAllFunctionsResponseTupleScheme();
+     }
+   }
+ 
+   private static class GetAllFunctionsResponseTupleScheme extends TupleScheme<GetAllFunctionsResponse> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResponse struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       BitSet optionals = new BitSet();
+       if (struct.isSetFunctions()) {
+         optionals.set(0);
+       }
+       oprot.writeBitSet(optionals, 1);
+       if (struct.isSetFunctions()) {
+         {
+           oprot.writeI32(struct.functions.size());
+           for (Function _iter528 : struct.functions)
+           {
+             _iter528.write(oprot);
+           }
+         }
+       }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResponse struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       BitSet incoming = iprot.readBitSet(1);
+       if (incoming.get(0)) {
+         {
+           org.apache.thrift.protocol.TList _list529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+           struct.functions = new ArrayList<Function>(_list529.size);
+           Function _elem530;
+           for (int _i531 = 0; _i531 < _list529.size; ++_i531)
+           {
+             _elem530 = new Function();
+             _elem530.read(iprot);
+             struct.functions.add(_elem530);
+           }
+         }
+         struct.setFunctionsIsSet(true);
+       }
+     }
+   }
+ 
+ }
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java
index 9fe8d82,61dea16..8d2b62e
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class GetOpenTxnsInfoResponse implements org.apache.thrift.TBase<GetOpenTxnsInfoResponse, GetOpenTxnsInfoResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GetOpenTxnsInfoResponse implements org.apache.thrift.TBase<GetOpenTxnsInfoResponse, GetOpenTxnsInfoResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetOpenTxnsInfoResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenTxnsInfoResponse");
  
    private static final org.apache.thrift.protocol.TField TXN_HIGH_WATER_MARK_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_high_water_mark", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
index 917ecea,bd21de5..3d70d93
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsResponse, GetOpenTxnsResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsResponse, GetOpenTxnsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetOpenTxnsResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenTxnsResponse");
  
    private static final org.apache.thrift.protocol.TField TXN_HIGH_WATER_MARK_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_high_water_mark", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java
index 565eba9,45e0cc1..1d34a40
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class GetPrincipalsInRoleRequest implements org.apache.thrift.TBase<GetPrincipalsInRoleRequest, GetPrincipalsInRoleRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GetPrincipalsInRoleRequest implements org.apache.thrift.TBase<GetPrincipalsInRoleRequest, GetPrincipalsInRoleRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetPrincipalsInRoleRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPrincipalsInRoleRequest");
  
    private static final org.apache.thrift.protocol.TField ROLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("roleName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java
index 3ef6224,805b898..3db7c94
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class GetPrincipalsInRoleResponse implements org.apache.thrift.TBase<GetPrincipalsInRoleResponse, GetPrincipalsInRoleResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GetPrincipalsInRoleResponse implements org.apache.thrift.TBase<GetPrincipalsInRoleResponse, GetPrincipalsInRoleResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetPrincipalsInRoleResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPrincipalsInRoleResponse");
  
    private static final org.apache.thrift.protocol.TField PRINCIPAL_GRANTS_FIELD_DESC = new org.apache.thrift.protocol.TField("principalGrants", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java
index 84b73c3,c2c7259..fb9eb88
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class GetRoleGrantsForPrincipalRequest implements org.apache.thrift.TBase<GetRoleGrantsForPrincipalRequest, GetRoleGrantsForPrincipalRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GetRoleGrantsForPrincipalRequest implements org.apache.thrift.TBase<GetRoleGrantsForPrincipalRequest, GetRoleGrantsForPrincipalRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetRoleGrantsForPrincipalRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetRoleGrantsForPrincipalRequest");
  
    private static final org.apache.thrift.protocol.TField PRINCIPAL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("principal_name", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java
index 3ddc1ac,7156665..42dfa01
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class GetRoleGrantsForPrincipalResponse implements org.apache.thrift.TBase<GetRoleGrantsForPrincipalResponse, GetRoleGrantsForPrincipalResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GetRoleGrantsForPrincipalResponse implements org.apache.thrift.TBase<GetRoleGrantsForPrincipalResponse, GetRoleGrantsForPrincipalResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetRoleGrantsForPrincipalResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetRoleGrantsForPrincipalResponse");
  
    private static final org.apache.thrift.protocol.TField PRINCIPAL_GRANTS_FIELD_DESC = new org.apache.thrift.protocol.TField("principalGrants", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java
index 29ce977,94e5a5e..8a042f7
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class GrantRevokePrivilegeRequest implements org.apache.thrift.TBase<GrantRevokePrivilegeRequest, GrantRevokePrivilegeRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GrantRevokePrivilegeRequest implements org.apache.thrift.TBase<GrantRevokePrivilegeRequest, GrantRevokePrivilegeRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GrantRevokePrivilegeRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokePrivilegeRequest");
  
    private static final org.apache.thrift.protocol.TField REQUEST_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("requestType", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java
index f86bf9f,1949aed..113a07f
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class GrantRevokePrivilegeResponse implements org.apache.thrift.TBase<GrantRevokePrivilegeResponse, GrantRevokePrivilegeResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GrantRevokePrivilegeResponse implements org.apache.thrift.TBase<GrantRevokePrivilegeResponse, GrantRevokePrivilegeResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GrantRevokePrivilegeResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokePrivilegeResponse");
  
    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java
index db17aef,ac3527a..c538b72
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class GrantRevokeRoleRequest implements org.apache.thrift.TBase<GrantRevokeRoleRequest, GrantRevokeRoleRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GrantRevokeRoleRequest implements org.apache.thrift.TBase<GrantRevokeRoleRequest, GrantRevokeRoleRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GrantRevokeRoleRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokeRoleRequest");
  
    private static final org.apache.thrift.protocol.TField REQUEST_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("requestType", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java
index b9acbb5,81ae31f..dd7ee80
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class GrantRevokeRoleResponse implements org.apache.thrift.TBase<GrantRevokeRoleResponse, GrantRevokeRoleResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class GrantRevokeRoleResponse implements org.apache.thrift.TBase<GrantRevokeRoleResponse, GrantRevokeRoleResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GrantRevokeRoleResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokeRoleResponse");
  
    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java
index 04439b9,4a9ba7f..f0e2a60
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class HeartbeatRequest implements org.apache.thrift.TBase<HeartbeatRequest, HeartbeatRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class HeartbeatRequest implements org.apache.thrift.TBase<HeartbeatRequest, HeartbeatRequest._Fields>, java.io.Serializable, Cloneable, Comparable<HeartbeatRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatRequest");
  
    private static final org.apache.thrift.protocol.TField LOCKID_FIELD_DESC = new org.apache.thrift.protocol.TField("lockid", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java
index 062449a,dd6ed17..de4fe40
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class HeartbeatTxnRangeRequest implements org.apache.thrift.TBase<HeartbeatTxnRangeRequest, HeartbeatTxnRangeRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class HeartbeatTxnRangeRequest implements org.apache.thrift.TBase<HeartbeatTxnRangeRequest, HeartbeatTxnRangeRequest._Fields>, java.io.Serializable, Cloneable, Comparable<HeartbeatTxnRangeRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatTxnRangeRequest");
  
    private static final org.apache.thrift.protocol.TField MIN_FIELD_DESC = new org.apache.thrift.protocol.TField("min", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java
index caf07d6,11bd82b..c8d2314
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class HeartbeatTxnRangeResponse implements org.apache.thrift.TBase<HeartbeatTxnRangeResponse, HeartbeatTxnRangeResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class HeartbeatTxnRangeResponse implements org.apache.thrift.TBase<HeartbeatTxnRangeResponse, HeartbeatTxnRangeResponse._Fields>, java.io.Serializable, Cloneable, Comparable<HeartbeatTxnRangeResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatTxnRangeResponse");
  
    private static final org.apache.thrift.protocol.TField ABORTED_FIELD_DESC = new org.apache.thrift.protocol.TField("aborted", org.apache.thrift.protocol.TType.SET, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java
index bd7641d,931b4e1..58fb744
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class HiveObjectPrivilege implements org.apache.thrift.TBase<HiveObjectPrivilege, HiveObjectPrivilege._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class HiveObjectPrivilege implements org.apache.thrift.TBase<HiveObjectPrivilege, HiveObjectPrivilege._Fields>, java.io.Serializable, Cloneable, Comparable<HiveObjectPrivilege> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveObjectPrivilege");
  
    private static final org.apache.thrift.protocol.TField HIVE_OBJECT_FIELD_DESC = new org.apache.thrift.protocol.TField("hiveObject", org.apache.thrift.protocol.TType.STRUCT, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java
index b22b211,2573cea..13a97ab
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class HiveObjectRef implements org.apache.thrift.TBase<HiveObjectRef, HiveObjectRef._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class HiveObjectRef implements org.apache.thrift.TBase<HiveObjectRef, HiveObjectRef._Fields>, java.io.Serializable, Cloneable, Comparable<HiveObjectRef> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveObjectRef");
  
    private static final org.apache.thrift.protocol.TField OBJECT_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("objectType", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java
index cb3a435,81227c3..ea997ce
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class Index implements org.apache.thrift.TBase<Index, Index._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class Index implements org.apache.thrift.TBase<Index, Index._Fields>, java.io.Serializable, Cloneable, Comparable<Index> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Index");
  
    private static final org.apache.thrift.protocol.TField INDEX_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("indexName", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IndexAlreadyExistsException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IndexAlreadyExistsException.java
index f90a65d,50517ed..a4bed6c
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IndexAlreadyExistsException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/IndexAlreadyExistsException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class IndexAlreadyExistsException extends TException implements org.apache.thrift.TBase<IndexAlreadyExistsException, IndexAlreadyExistsException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class IndexAlreadyExistsException extends TException implements org.apache.thrift.TBase<IndexAlreadyExistsException, IndexAlreadyExistsException._Fields>, java.io.Serializable, Cloneable, Comparable<IndexAlreadyExistsException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("IndexAlreadyExistsException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
index c1992c0,2e14cf9..5c07c0b
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEventRequestData, InsertEventRequestData._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEventRequestData, InsertEventRequestData._Fields>, java.io.Serializable, Cloneable, Comparable<InsertEventRequestData> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InsertEventRequestData");
  
    private static final org.apache.thrift.protocol.TField FILES_ADDED_FIELD_DESC = new org.apache.thrift.protocol.TField("filesAdded", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java
index 4b63e3f,87e2f6b..5992099
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class InvalidInputException extends TException implements org.apache.thrift.TBase<InvalidInputException, InvalidInputException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class InvalidInputException extends TException implements org.apache.thrift.TBase<InvalidInputException, InvalidInputException._Fields>, java.io.Serializable, Cloneable, Comparable<InvalidInputException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidInputException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java
index 2f1414e,14dd7d8..60e9b39
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class InvalidObjectException extends TException implements org.apache.thrift.TBase<InvalidObjectException, InvalidObjectException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class InvalidObjectException extends TException implements org.apache.thrift.TBase<InvalidObjectException, InvalidObjectException._Fields>, java.io.Serializable, Cloneable, Comparable<InvalidObjectException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidObjectException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java
index 9b6acb8,2e3f1e3..e565e1e
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class InvalidOperationException extends TException implements org.apache.thrift.TBase<InvalidOperationException, InvalidOperationException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class InvalidOperationException extends TException implements org.apache.thrift.TBase<InvalidOperationException, InvalidOperationException._Fields>, java.io.Serializable, Cloneable, Comparable<InvalidOperationException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidOperationException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java
index 99ffddb,1d9b565..00ff5ee
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class InvalidPartitionException extends TException implements org.apache.thrift.TBase<InvalidPartitionException, InvalidPartitionException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class InvalidPartitionException extends TException implements org.apache.thrift.TBase<InvalidPartitionException, InvalidPartitionException._Fields>, java.io.Serializable, Cloneable, Comparable<InvalidPartitionException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidPartitionException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java
index 59c8199,319f8bb..106adaf
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class LockComponent implements org.apache.thrift.TBase<LockComponent, LockComponent._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class LockComponent implements org.apache.thrift.TBase<LockComponent, LockComponent._Fields>, java.io.Serializable, Cloneable, Comparable<LockComponent> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LockComponent");
  
    private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java
index a5d8f9b,6894bfa..d319d0c
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class LockRequest implements org.apache.thrift.TBase<LockRequest, LockRequest._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class LockRequest implements org.apache.thrift.TBase<LockRequest, LockRequest._Fields>, java.io.Serializable, Cloneable, Comparable<LockRequest> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LockRequest");
  
    private static final org.apache.thrift.protocol.TField COMPONENT_FIELD_DESC = new org.apache.thrift.protocol.TField("component", org.apache.thrift.protocol.TType.LIST, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java
index c9ab465,08acaeb..0f4e115
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class LockResponse implements org.apache.thrift.TBase<LockResponse, LockResponse._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class LockResponse implements org.apache.thrift.TBase<LockResponse, LockResponse._Fields>, java.io.Serializable, Cloneable, Comparable<LockResponse> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LockResponse");
  
    private static final org.apache.thrift.protocol.TField LOCKID_FIELD_DESC = new org.apache.thrift.protocol.TField("lockid", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java
index d817d46,93d2386..3df92ab
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class LongColumnStatsData implements org.apache.thrift.TBase<LongColumnStatsData, LongColumnStatsData._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class LongColumnStatsData implements org.apache.thrift.TBase<LongColumnStatsData, LongColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<LongColumnStatsData> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LongColumnStatsData");
  
    private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.I64, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
index 108bca9,883a1d4..ab00b5d
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class MetaException extends TException implements org.apache.thrift.TBase<MetaException, MetaException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class MetaException extends TException implements org.apache.thrift.TBase<MetaException, MetaException._Fields>, java.io.Serializable, Cloneable, Comparable<MetaException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MetaException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
----------------------------------------------------------------------
diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
index 9c8bd0b,e214b36..501e857
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
@@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
  
- public class NoSuchLockException extends TException implements org.apache.thrift.TBase<NoSuchLockException, NoSuchLockException._Fields>, java.io.Serializable, Cloneable {
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
+ public class NoSuchLockException extends TException implements org.apache.thrift.TBase<NoSuchLockException, NoSuchLockException._Fields>, java.io.Serializable, Cloneable, Comparable<NoSuchLockException> {
    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchLockException");
  
    private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);


[27/50] [abbrv] hive git commit: HIVE-11464: lineage info missing if there are multiple outputs (Jimmy)

Posted by se...@apache.org.
HIVE-11464: lineage info missing if there are multiple outputs (Jimmy)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1a75644d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1a75644d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1a75644d

Branch: refs/heads/hbase-metastore
Commit: 1a75644d68c8c61fbafb4058fe45b7823492491c
Parents: f26b256
Author: Jimmy Xiang <jx...@cloudera.com>
Authored: Wed Aug 5 08:02:50 2015 -0700
Committer: Jimmy Xiang <jx...@cloudera.com>
Committed: Thu Aug 13 13:44:03 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  8 ++--
 .../hadoop/hive/ql/hooks/LineageInfo.java       |  9 ++--
 .../hadoop/hive/ql/hooks/LineageLogger.java     | 44 +++++++++++++-------
 .../ql/optimizer/lineage/ExprProcFactory.java   |  9 ++--
 .../hive/ql/optimizer/lineage/LineageCtx.java   | 34 +++++++++++----
 .../ql/optimizer/lineage/OpProcFactory.java     | 10 ++---
 ql/src/test/queries/clientpositive/lineage3.q   | 15 +++++++
 .../test/results/clientpositive/lineage3.q.out  | 32 +++++++++++++-
 8 files changed, 118 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index e7b7b55..c0c1b2e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -441,8 +441,11 @@ public class Driver implements CommandProcessor {
       // to avoid returning sensitive data
       String queryStr = HookUtils.redactLogString(conf, command);
 
+      // get the output schema
+      schema = getSchema(sem, conf);
+
       plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId,
-        SessionState.get().getHiveOperation(), getSchema(sem, conf));
+        SessionState.get().getHiveOperation(), schema);
 
       conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr);
 
@@ -454,9 +457,6 @@ public class Driver implements CommandProcessor {
         plan.getFetchTask().initialize(conf, plan, null);
       }
 
-      // get the output schema
-      schema = getSchema(sem, conf);
-
       //do the authorization check
       if (!sem.skipAuthorization() &&
           HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
index fe0841e..2806c54 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
@@ -22,7 +22,6 @@ import java.io.Serializable;
 import java.util.Collections;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
@@ -375,9 +374,9 @@ public class LineageInfo implements Serializable {
     private String expr;
 
     /**
-     * The list of base columns that the particular column depends on.
+     * The set of base columns that the particular column depends on.
      */
-    private List<BaseColumnInfo> baseCols;
+    private Set<BaseColumnInfo> baseCols;
 
     /**
      * @return the type
@@ -410,14 +409,14 @@ public class LineageInfo implements Serializable {
     /**
      * @return the baseCols
      */
-    public List<BaseColumnInfo> getBaseCols() {
+    public Set<BaseColumnInfo> getBaseCols() {
       return baseCols;
     }
 
     /**
      * @param baseCols the baseCols to set
      */
-    public void setBaseCols(List<BaseColumnInfo> baseCols) {
+    public void setBaseCols(Set<BaseColumnInfo> baseCols) {
       this.baseCols = baseCols;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
index d615372..3c6ce94 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
@@ -33,6 +33,7 @@ import org.apache.commons.io.output.StringBuilderWriter;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -147,6 +148,7 @@ public class LineageLogger implements ExecuteWithHookContext {
           // Don't emit user/timestamp info in test mode,
           // so that the test golden output file is fixed.
           long queryTime = plan.getQueryStartTime().longValue();
+          if (queryTime == 0) queryTime = System.currentTimeMillis();
           writer.name("user").value(hookContext.getUgi().getUserName());
           writer.name("timestamp").value(queryTime/1000);
           writer.name("jobIds");
@@ -209,23 +211,28 @@ public class LineageLogger implements ExecuteWithHookContext {
    * For each target column, find out its sources based on the dependency index.
    */
   private List<Edge> getEdges(QueryPlan plan, Index index) {
-    List<FieldSchema> fieldSchemas = plan.getResultSchema().getFieldSchemas();
-    int fields = fieldSchemas == null ? 0 : fieldSchemas.size();
-    SelectOperator finalSelOp = index.getFinalSelectOp();
+    LinkedHashMap<String, ObjectPair<SelectOperator,
+      org.apache.hadoop.hive.ql.metadata.Table>> finalSelOps = index.getFinalSelectOps();
+    Set<Vertex> allTargets = new LinkedHashSet<Vertex>();
+    Map<String, Vertex> allSources = new LinkedHashMap<String, Vertex>();
     List<Edge> edges = new ArrayList<Edge>();
-    if (finalSelOp != null && fields > 0) {
-      Map<ColumnInfo, Dependency> colMap = index.getDependencies(finalSelOp);
-      List<Dependency> dependencies = colMap != null ? Lists.newArrayList(colMap.values()) : null;
-      if (dependencies == null || dependencies.size() != fields) {
-        log("Result schema has " + fields
-          + " fields, but we don't get as many dependencies");
+    for (ObjectPair<SelectOperator,
+        org.apache.hadoop.hive.ql.metadata.Table> pair: finalSelOps.values()) {
+      List<FieldSchema> fieldSchemas = plan.getResultSchema().getFieldSchemas();
+      SelectOperator finalSelOp = pair.getFirst();
+      org.apache.hadoop.hive.ql.metadata.Table t = pair.getSecond();
+      String destTableName = null;
+      List<String> colNames = null;
+      if (t != null) {
+        destTableName = t.getDbName() + "." + t.getTableName();
+        fieldSchemas = t.getCols();
       } else {
-        String destTableName = null;
-        List<String> colNames = null;
         // Based on the plan outputs, find out the target table name and column names.
         for (WriteEntity output : plan.getOutputs()) {
-          if (output.getType() == Entity.Type.TABLE) {
-            org.apache.hadoop.hive.ql.metadata.Table t = output.getTable();
+          Entity.Type entityType = output.getType();
+          if (entityType == Entity.Type.TABLE
+              || entityType == Entity.Type.PARTITION) {
+            t = output.getTable();
             destTableName = t.getDbName() + "." + t.getTableName();
             List<FieldSchema> cols = t.getCols();
             if (cols != null && !cols.isEmpty()) {
@@ -234,10 +241,15 @@ public class LineageLogger implements ExecuteWithHookContext {
             break;
           }
         }
-
+      }
+      int fields = fieldSchemas.size();
+      Map<ColumnInfo, Dependency> colMap = index.getDependencies(finalSelOp);
+      List<Dependency> dependencies = colMap != null ? Lists.newArrayList(colMap.values()) : null;
+      if (dependencies == null || dependencies.size() != fields) {
+        log("Result schema has " + fields
+          + " fields, but we don't get as many dependencies");
+      } else {
         // Go through each target column, generate the lineage edges.
-        Set<Vertex> allTargets = new LinkedHashSet<Vertex>();
-        Map<String, Vertex> allSources = new LinkedHashMap<String, Vertex>();
         for (int i = 0; i < fields; i++) {
           Vertex target = new Vertex(
             getTargetFieldName(i, destTableName, colNames, fieldSchemas));

http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java
index 455a525..38040e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java
@@ -24,6 +24,7 @@ import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.Stack;
 
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -124,7 +125,7 @@ public class ExprProcFactory {
         bci_set.addAll(child_dep.getBaseCols());
       }
 
-      dep.setBaseCols(new ArrayList<BaseColumnInfo>(bci_set));
+      dep.setBaseCols(bci_set);
       dep.setType(new_type);
 
       return dep;
@@ -146,7 +147,7 @@ public class ExprProcFactory {
       // Create a dependency that has no basecols
       Dependency dep = new Dependency();
       dep.setType(LineageInfo.DependencyType.SIMPLE);
-      dep.setBaseCols(new ArrayList<BaseColumnInfo>());
+      dep.setBaseCols(new LinkedHashSet<BaseColumnInfo>());
 
       return dep;
     }
@@ -218,9 +219,9 @@ public class ExprProcFactory {
       Dependency dep = lctx.getIndex().getDependency(inpOp, internalName);
       if ((tabAlias == null || tabAlias.startsWith("_") || tabAlias.startsWith("$"))
           && (dep != null && dep.getType() == DependencyType.SIMPLE)) {
-        List<BaseColumnInfo> baseCols = dep.getBaseCols();
+        Set<BaseColumnInfo> baseCols = dep.getBaseCols();
         if (baseCols != null && !baseCols.isEmpty()) {
-          BaseColumnInfo baseCol = baseCols.get(0);
+          BaseColumnInfo baseCol = baseCols.iterator().next();
           tabAlias = baseCol.getTabAlias().getAlias();
           alias = baseCol.getColumn().getName();
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
index d26d8da..c33d775 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
@@ -25,7 +25,9 @@ import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
+import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.SelectOperator;
 import org.apache.hadoop.hive.ql.hooks.LineageInfo;
@@ -33,6 +35,7 @@ import org.apache.hadoop.hive.ql.hooks.LineageInfo.BaseColumnInfo;
 import org.apache.hadoop.hive.ql.hooks.LineageInfo.Dependency;
 import org.apache.hadoop.hive.ql.hooks.LineageInfo.Predicate;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
+import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 
@@ -59,7 +62,11 @@ public class LineageCtx implements NodeProcessorCtx {
      */
     private final Map<Operator<? extends OperatorDesc>, Set<Predicate>> condMap;
 
-    private SelectOperator finalSelectOp;
+    /**
+     * A map from a final select operator id to the select operator
+     * and the corresponding target table in case an insert into query.
+     */
+    private LinkedHashMap<String, ObjectPair<SelectOperator, Table>> finalSelectOps;
 
     /**
      * Constructor.
@@ -69,6 +76,8 @@ public class LineageCtx implements NodeProcessorCtx {
         new LinkedHashMap<Operator<? extends OperatorDesc>,
                           LinkedHashMap<ColumnInfo, Dependency>>();
       condMap = new HashMap<Operator<? extends OperatorDesc>, Set<Predicate>>();
+      finalSelectOps =
+        new LinkedHashMap<String, ObjectPair<SelectOperator, Table>>();
     }
 
     /**
@@ -146,7 +155,7 @@ public class LineageCtx implements NodeProcessorCtx {
         old_dep.setType(new_type);
         Set<BaseColumnInfo> bci_set = new LinkedHashSet<BaseColumnInfo>(old_dep.getBaseCols());
         bci_set.addAll(dep.getBaseCols());
-        old_dep.setBaseCols(new ArrayList<BaseColumnInfo>(bci_set));
+        old_dep.setBaseCols(bci_set);
         // TODO: Fix the expressions later.
         old_dep.setExpr(null);
       }
@@ -179,16 +188,27 @@ public class LineageCtx implements NodeProcessorCtx {
       return condMap.get(op);
     }
 
-    public void setFinalSelectOp(SelectOperator sop) {
-      finalSelectOp = sop;
+    public void addFinalSelectOp(
+        SelectOperator sop, Operator<? extends OperatorDesc> sinkOp) {
+      String operatorId = sop.getOperatorId();
+      if (!finalSelectOps.containsKey(operatorId)) {
+        Table table = null;
+        if (sinkOp instanceof FileSinkOperator) {
+          FileSinkOperator fso = (FileSinkOperator) sinkOp;
+          table = fso.getConf().getTable();
+        }
+        finalSelectOps.put(operatorId,
+          new ObjectPair<SelectOperator, Table>(sop, table));
+      }
     }
 
-    public SelectOperator getFinalSelectOp() {
-      return finalSelectOp;
+    public LinkedHashMap<String,
+        ObjectPair<SelectOperator, Table>> getFinalSelectOps() {
+      return finalSelectOps;
     }
 
     public void clear() {
-      finalSelectOp = null;
+      finalSelectOps.clear();
       depMap.clear();
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
index f670db8..5c5d0d6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
@@ -120,7 +120,7 @@ public class OpProcFactory {
       }
 
       dep.setType(new_type);
-      dep.setBaseCols(new ArrayList<BaseColumnInfo>(col_set));
+      dep.setBaseCols(col_set);
 
       boolean isScript = op instanceof ScriptOperator;
 
@@ -186,7 +186,7 @@ public class OpProcFactory {
 
         // Populate the dependency
         dep.setType(LineageInfo.DependencyType.SIMPLE);
-        dep.setBaseCols(new ArrayList<BaseColumnInfo>());
+        dep.setBaseCols(new LinkedHashSet<BaseColumnInfo>());
         dep.getBaseCols().add(bci);
 
         // Put the dependency in the map
@@ -396,7 +396,7 @@ public class OpProcFactory {
       }
       if (op == null || (op.getChildOperators().isEmpty()
           && op instanceof FileSinkOperator)) {
-        lctx.getIndex().setFinalSelectOp(sop);
+        lctx.getIndex().addFinalSelectOp(sop, op);
       }
 
       return null;
@@ -450,7 +450,7 @@ public class OpProcFactory {
             new_type = LineageCtx.getNewDependencyType(expr_dep.getType(), new_type);
             bci_set.addAll(expr_dep.getBaseCols());
             if (expr_dep.getType() == LineageInfo.DependencyType.SIMPLE) {
-              BaseColumnInfo col = expr_dep.getBaseCols().get(0);
+              BaseColumnInfo col = expr_dep.getBaseCols().iterator().next();
               Table t = col.getTabAlias().getTable();
               if (t != null) {
                 sb.append(t.getDbName()).append(".").append(t.getTableName()).append(".");
@@ -514,7 +514,7 @@ public class OpProcFactory {
           }
         }
 
-        dep.setBaseCols(new ArrayList<BaseColumnInfo>(bci_set));
+        dep.setBaseCols(bci_set);
         dep.setType(new_type);
         lctx.getIndex().putDependency(gop, col_infos.get(cnt++), dep);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/test/queries/clientpositive/lineage3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lineage3.q b/ql/src/test/queries/clientpositive/lineage3.q
index 53fff0f..c24ff7d 100644
--- a/ql/src/test/queries/clientpositive/lineage3.q
+++ b/ql/src/test/queries/clientpositive/lineage3.q
@@ -1,5 +1,20 @@
 set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger;
 
+drop table if exists d1;
+create table d1(a int);
+
+from (select a.ctinyint x, b.cstring1 y
+from alltypesorc a join alltypesorc b on a.cint = b.cbigint) t
+insert into table d1 select x + length(y);
+
+drop table if exists d2;
+create table d2(b varchar(128));
+
+from (select a.ctinyint x, b.cstring1 y
+from alltypesorc a join alltypesorc b on a.cint = b.cbigint) t
+insert into table d1 select x where y is null
+insert into table d2 select y where x > 0;
+
 drop table if exists t;
 create table t as
 select * from

http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/test/results/clientpositive/lineage3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out
index 75d88f8..b6b4e0b 100644
--- a/ql/src/test/results/clientpositive/lineage3.q.out
+++ b/ql/src/test/results/clientpositive/lineage3.q.out
@@ -1,3 +1,31 @@
+PREHOOK: query: drop table if exists d1
+PREHOOK: type: DROPTABLE
+PREHOOK: query: create table d1(a int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@d1
+PREHOOK: query: from (select a.ctinyint x, b.cstring1 y
+from alltypesorc a join alltypesorc b on a.cint = b.cbigint) t
+insert into table d1 select x + length(y)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@d1
+{"version":"1.0","engine":"mr","hash":"4c9b7b8d89403cef78668f15d393e542","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x + length(y)","edges":[{"sources":[1,2],"targets":[0],"expression":"(UDFToInteger(a.ctinyint) + length(a.cstring1))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
+PREHOOK: query: drop table if exists d2
+PREHOOK: type: DROPTABLE
+PREHOOK: query: create table d2(b varchar(128))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@d2
+PREHOOK: query: from (select a.ctinyint x, b.cstring1 y
+from alltypesorc a join alltypesorc b on a.cint = b.cbigint) t
+insert into table d1 select x where y is null
+insert into table d2 select y where x > 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@d1
+PREHOOK: Output: default@d2
+{"version":"1.0","engine":"mr","hash":"8703e4091ebd4c96afd3cac83e3a2957","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x where y is null\ninsert into table d2 select y where x > 0","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(x)","edgeType":"PROJECTION"},{"sources":[3],"targets":[0,1],"expression":"t.y is null","edgeType":"PREDICATE"},{"sources":[4,5],"targets":[0,1],"expression":"(UDFToLong(a.cint) = b.cbigint)","edgeType":"PREDICATE"},{"sources":[3],"targets":[1],"expression":"CAST( y AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(t.x > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.d2.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.
 cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
 PREHOOK: query: drop table if exists t
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table t as
@@ -23,7 +51,7 @@ where cint is not null and cint < 0 order by cint, cs limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@dest_l1@ds=today
-{"version":"1.0","engine":"mr","hash":"2b5891d094ff74e23ec6acf5b4990f45","queryText":"insert into table dest_l1 partition (ds='today')\nselect cint, cast(cstring1 as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cint < 0 order by cint, cs limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( alltypesorc.cstring1 AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cint < 0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cint"},{"id":1,"vertexType":"COLUMN","vertexId":"cs"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
+{"version":"1.0","engine":"mr","hash":"2b5891d094ff74e23ec6acf5b4990f45","queryText":"insert into table dest_l1 partition (ds='today')\nselect cint, cast(cstring1 as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cint < 0 order by cint, cs limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( alltypesorc.cstring1 AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cint < 0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
 PREHOOK: query: insert into table dest_l1 partition (ds='tomorrow')
 select min(cint), cast(min(cstring1) as varchar(128)) as cs
 from alltypesorc
@@ -33,7 +61,7 @@ having min(cbigint) > 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@dest_l1@ds=tomorrow
-{"version":"1.0","engine":"mr","hash":"4ad6338a8abfe3fe0342198fcbd1f11d","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[2],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2,4],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cboolean1 = true))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1],"expression":"(min(default.alltypesorc.cbigint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"cs"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN",
 "vertexId":"default.alltypesorc.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
+{"version":"1.0","engine":"mr","hash":"4ad6338a8abfe3fe0342198fcbd1f11d","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[2],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2,4],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cboolean1 = true))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1],"expression":"(min(default.alltypesorc.cbigint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},
 {"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
 PREHOOK: query: select cint, rank() over(order by cint) from alltypesorc
 where cint > 10 and cint < 10000 limit 10
 PREHOOK: type: QUERY


[31/50] [abbrv] hive git commit: HIVE-11534: Improve validateTableCols error message (Mohit via Xuefu)

Posted by se...@apache.org.
HIVE-11534: Improve validateTableCols error message (Mohit via Xuefu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a4849cb2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a4849cb2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a4849cb2

Branch: refs/heads/hbase-metastore
Commit: a4849cb2c5b7d1c542881e12bf0e48f73917d96c
Parents: b8f1ae1
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Fri Aug 14 06:48:19 2015 -0700
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Fri Aug 14 06:48:19 2015 -0700

----------------------------------------------------------------------
 .../hive/metastore/TestHiveMetaStore.java       | 66 ++++++++++++++++++++
 .../hadoop/hive/metastore/ObjectStore.java      |  7 ++-
 2 files changed, 71 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a4849cb2/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index 5a344bb..160667d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -2966,4 +2966,70 @@ public abstract class TestHiveMetaStore extends TestCase {
     };
     return hookLoader;
   }
+
+  public void testValidateTableCols() throws Throwable {
+
+    try {
+      String dbName = "compdb";
+      String tblName = "comptbl";
+
+      client.dropTable(dbName, tblName);
+      silentDropDatabase(dbName);
+      Database db = new Database();
+      db.setName(dbName);
+      db.setDescription("Validate Table Columns test");
+      client.createDatabase(db);
+
+      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
+      cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
+      cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
+
+      Table tbl = new Table();
+      tbl.setDbName(dbName);
+      tbl.setTableName(tblName);
+      StorageDescriptor sd = new StorageDescriptor();
+      tbl.setSd(sd);
+      sd.setCols(cols);
+      sd.setCompressed(false);
+      sd.setSerdeInfo(new SerDeInfo());
+      sd.getSerdeInfo().setName(tbl.getTableName());
+      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
+      sd.getSerdeInfo().getParameters()
+          .put(serdeConstants.SERIALIZATION_FORMAT, "1");
+      sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
+      sd.setInputFormat(HiveInputFormat.class.getName());
+      sd.setOutputFormat(HiveOutputFormat.class.getName());
+      sd.setSortCols(new ArrayList<Order>());
+
+      client.createTable(tbl);
+      if (isThriftClient) {
+        tbl = client.getTable(dbName, tblName);
+      }
+
+      List<String> expectedCols = Lists.newArrayList();
+      expectedCols.add("name");
+      ObjectStore objStore = new ObjectStore();
+      try {
+        objStore.validateTableCols(tbl, expectedCols);
+      } catch (MetaException ex) {
+        throw new RuntimeException(ex);
+      }
+
+      expectedCols.add("doesntExist");
+      boolean exceptionFound = false;
+      try {
+        objStore.validateTableCols(tbl, expectedCols);
+      } catch (MetaException ex) {
+        assertEquals(ex.getMessage(),
+            "Column doesntExist doesn't exist in table comptbl in database compdb");
+        exceptionFound = true;
+      }
+      assertTrue(exceptionFound);
+
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testValidateTableCols() failed.");
+      throw e;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/a4849cb2/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index a37fbde..d165fc8 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -51,6 +51,7 @@ import javax.jdo.Transaction;
 import javax.jdo.datastore.DataStoreCache;
 import javax.jdo.identity.IntIdentity;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.antlr.runtime.CommonTokenStream;
 import org.antlr.runtime.RecognitionException;
 import org.apache.commons.logging.Log;
@@ -6261,7 +6262,8 @@ public class ObjectStore implements RawStore, Configurable {
     }
   }
 
-  private void validateTableCols(Table table, List<String> colNames) throws MetaException {
+  @VisibleForTesting
+  public void validateTableCols(Table table, List<String> colNames) throws MetaException {
     List<FieldSchema> colList = table.getSd().getCols();
     for (String colName : colNames) {
       boolean foundCol = false;
@@ -6272,7 +6274,8 @@ public class ObjectStore implements RawStore, Configurable {
         }
       }
       if (!foundCol) {
-        throw new MetaException("Column " + colName + " doesn't exist.");
+        throw new MetaException("Column " + colName + " doesn't exist in table "
+            + table.getTableName() + " in database " + table.getDbName());
       }
     }
   }


[42/50] [abbrv] hive git commit: HIVE-11571: Fix Hive PTest2 logging configuration (Gopal V reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11571: Fix Hive PTest2 logging configuration (Gopal V reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3071ce96
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3071ce96
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3071ce96

Branch: refs/heads/hbase-metastore
Commit: 3071ce96b6b8635f668d0698c18a727bea1b1de1
Parents: fe1efe5
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Fri Aug 14 15:40:48 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Fri Aug 14 15:40:48 2015 -0700

----------------------------------------------------------------------
 testutils/ptest2/src/main/resources/log4j2.xml | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3071ce96/testutils/ptest2/src/main/resources/log4j2.xml
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/log4j2.xml b/testutils/ptest2/src/main/resources/log4j2.xml
index 6502ad1..42141b7 100644
--- a/testutils/ptest2/src/main/resources/log4j2.xml
+++ b/testutils/ptest2/src/main/resources/log4j2.xml
@@ -75,5 +75,6 @@
     <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
       <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
     </Logger>
+  </Loggers>
 
 </Configuration>


[28/50] [abbrv] hive git commit: HIVE-11467 : WriteBuffers rounding wbSize to next power of 2 may cause OOM (Wei Zheng, reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11467 : WriteBuffers rounding wbSize to next power of 2 may cause OOM (Wei Zheng, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/433ea9cd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/433ea9cd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/433ea9cd

Branch: refs/heads/hbase-metastore
Commit: 433ea9cda59a986689cf99953826512e4ff07e4d
Parents: 1a75644
Author: Sergey Shelukhin <se...@apache.org>
Authored: Thu Aug 13 14:46:07 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Thu Aug 13 14:46:07 2015 -0700

----------------------------------------------------------------------
 .../hive/ql/exec/persistence/HybridHashTableContainer.java       | 4 ++++
 serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java   | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/433ea9cd/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index ad1246d..ff64f52 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -290,6 +290,10 @@ public class HybridHashTableContainer
       }
     }
 
+    // Round to power of 2 here, as is required by WriteBuffers
+    writeBufferSize = Integer.bitCount(writeBufferSize) == 1 ?
+        writeBufferSize : Integer.highestOneBit(writeBufferSize);
+
     // Cap WriteBufferSize to avoid large preallocations
     writeBufferSize = writeBufferSize < minWbSize ? minWbSize : Math.min(maxWbSize, writeBufferSize);
     LOG.info("Write buffer size: " + writeBufferSize);

http://git-wip-us.apache.org/repos/asf/hive/blob/433ea9cd/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java b/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
index acb51f9..05d9359 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/WriteBuffers.java
@@ -56,7 +56,7 @@ public final class WriteBuffers implements RandomAccessOutput {
 
 
   public WriteBuffers(int wbSize, long maxSize) {
-    this.wbSize = Integer.bitCount(wbSize) == 1 ? wbSize : (Integer.highestOneBit(wbSize) << 1);
+    this.wbSize = Integer.bitCount(wbSize) == 1 ? wbSize : Integer.highestOneBit(wbSize);
     this.wbSizeLog2 = 31 - Integer.numberOfLeadingZeros(this.wbSize);
     this.offsetMask = this.wbSize - 1;
     this.maxSize = maxSize;


[10/50] [abbrv] hive git commit: HIVE-11398: Parse wide OR and wide AND trees to flat OR/AND trees (Jesus Camacho Rodriguez via Gopal V)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join2.q.out b/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
index 6fed2dc..b4c7f23 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE
                       Filter Operator
-                        predicate: ((_col0 <> '311') and (((_col1 <> 'val_50') or (_col0 > '1')) and (_col0 < '400'))) (type: boolean)
+                        predicate: ((_col0 <> '311') and ((_col1 <> 'val_50') or (_col0 > '1')) and (_col0 < '400')) (type: boolean)
                         Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE
                         Filter Operator
                           predicate: ((_col0 <> '305') and (_col0 <> '14')) (type: boolean)
@@ -108,7 +108,7 @@ STAGE PLANS:
                         predicate: (_col0 <> '14') (type: boolean)
                         Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
                         Filter Operator
-                          predicate: ((_col0 <> '302') and ((_col0 <> '311') and (_col0 < '400'))) (type: boolean)
+                          predicate: ((_col0 <> '302') and (_col0 <> '311') and (_col0 < '400')) (type: boolean)
                           Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                           Filter Operator
                             predicate: _col0 is not null (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join3.q.out b/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
index 661d9d1..c9f6762 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
@@ -54,7 +54,7 @@ STAGE PLANS:
                         predicate: (_col0 <> '1') (type: boolean)
                         Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE
                         Filter Operator
-                          predicate: ((_col0 <> '11') and ((_col0 > '0') and ((_col0 < '400') and ((_col0 <> '12') and (_col0 <> '4'))))) (type: boolean)
+                          predicate: ((_col0 <> '11') and (_col0 > '0') and (_col0 < '400') and (_col0 <> '12') and (_col0 <> '4')) (type: boolean)
                           Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                           Filter Operator
                             predicate: _col0 is not null (type: boolean)
@@ -77,7 +77,7 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE
                       Filter Operator
-                        predicate: ((_col0 > '0') and (((_col1 <> 'val_500') or (_col0 > '1')) and (_col0 < '400'))) (type: boolean)
+                        predicate: ((_col0 > '0') and ((_col1 <> 'val_500') or (_col0 > '1')) and (_col0 < '400')) (type: boolean)
                         Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                         Select Operator
                           expressions: _col0 (type: string)
@@ -110,7 +110,7 @@ STAGE PLANS:
                         predicate: (_col0 <> '4') (type: boolean)
                         Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE
                         Filter Operator
-                          predicate: ((_col0 <> '11') and ((_col0 > '0') and (_col0 < '400'))) (type: boolean)
+                          predicate: ((_col0 <> '11') and (_col0 > '0') and (_col0 < '400')) (type: boolean)
                           Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                           Filter Operator
                             predicate: _col0 is not null (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
index db5914c..16acc67 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
@@ -99,7 +99,7 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                       Filter Operator
-                        predicate: ((_col0 > '10') and ((_col0 < '20') and ((_col0 > '15') and (_col0 < '25')))) (type: boolean)
+                        predicate: ((_col0 > '10') and (_col0 < '20') and (_col0 > '15') and (_col0 < '25')) (type: boolean)
                         Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         Filter Operator
                           predicate: _col0 is not null (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
index d1dc486..90bcc1b 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
@@ -994,7 +994,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cstring2 like '%b%') or ((79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble))) (type: boolean)
+                    predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
index aa10d96..22be1d7 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
@@ -86,7 +86,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > 11.0) and ((UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
+                    predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                     Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
@@ -339,7 +339,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > -1.388) and ((UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
+                    predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                     Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
index 4bd24c3..8013bfe 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
@@ -82,7 +82,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cstring2 like '%ss%') or ((cstring1 like '10%') or ((cint >= -75) and ((UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))))) (type: boolean)
+                    predicate: ((cstring2 like '%ss%') or (cstring1 like '10%') or ((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
index ea5b0da..1a6e971 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
@@ -67,7 +67,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or ((UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)))) (type: boolean)
+                    predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
                     Statistics: Num rows: 4778 Data size: 146682 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % UDFToDouble(cbigint)) (type: double), (2563.58 + (- (- cdouble))) (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
index ae233f4..6bd1bb2 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
@@ -152,7 +152,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((762 = cbigint) or (((UDFToFloat(csmallint) < cfloat) and ((UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint)))) or ((cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and ((cstring2 <> 'a') and ((79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))))))) (type: boolean)
+                    predicate: ((762 = cbigint) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and (cstring2 <> 'a') and (79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cdouble (type: double), csmallint (type: smallint), cfloat (type: float), ctinyint (type: tinyint)
@@ -364,7 +364,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or (((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or (((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))))) (type: boolean)
+                    predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or ((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean)
                     Statistics: Num rows: 6826 Data size: 209555 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cbigint (type: bigint), csmallint (type: smallint), cdouble (type: double), ctinyint (type: tinyint)
@@ -567,7 +567,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((ctimestamp1 = ctimestamp2) or ((762.0 = cfloat) or ((cstring1 = 'ss') or (((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and (ctimestamp2 is not null and (cstring2 > 'a'))))))) (type: boolean)
+                    predicate: ((ctimestamp1 = ctimestamp2) or (762.0 = cfloat) or (cstring1 = 'ss') or ((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and ctimestamp2 is not null and (cstring2 > 'a'))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cdouble (type: double)
@@ -749,7 +749,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ctimestamp2 <= ctimestamp1) and ((UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1))) or (((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0))) (type: boolean)
+                    predicate: (((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or ((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0)) (type: boolean)
                     Statistics: Num rows: 8874 Data size: 272428 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), cbigint (type: bigint), cint (type: int), cfloat (type: float)
@@ -939,7 +939,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or (((1 <> cboolean2) and ((UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint)))) or (((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))))) (type: boolean)
+                    predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or ((1 <> cboolean2) and (UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint))) or ((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))) (type: boolean)
                     Statistics: Num rows: 9898 Data size: 303864 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728 * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - UDFToDouble(cint)) (type: double), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - UDFToDouble(cint)) - -26.28) (type: double), (- cfloat) (type: float), (cdouble * -89010.0) (type: double), (UDFToDouble(ctinyint) / 988888.0) (type: double), (- ctinyint) (type: tinyint), (79.553 / UDFToDouble(ctinyint)) (type: double)
@@ -1197,7 +1197,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or ((cbigint = 359) or ((cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))))) (type: boolean)
+                    predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or (cbigint = 359) or (cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (UDFToDouble(cbigint) % 79.553) (type: double), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569 % cbigint) (type: bigint), (359.0 - cdouble) (type: double), (- csmallint) (type: smallint)
@@ -1404,7 +1404,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or (((cdouble <= UDFToDouble(cbigint)) and ((cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble))) or ((UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))))) (type: boolean)
+                    predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or ((cdouble <= UDFToDouble(cbigint)) and (cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble)) or (UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))) (type: boolean)
                     Statistics: Num rows: 10922 Data size: 335301 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cstring1 (type: string), cboolean2 (type: boolean), ctimestamp2 (type: timestamp), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), cboolean1 (type: boolean), (cint + UDFToInteger(csmallint)) (type: int), (cbigint - UDFToLong(ctinyint)) (type: bigint), (- cbigint) (type: bigint), (- cfloat) (type: float), ((cbigint - UDFToLong(ctinyint)) + cbigint) (type: bigint), (cdouble / cdouble) (type: double), (- cdouble) (type: double), (UDFToLong((cint + UDFToInteger(csmallint))) * (- cbigint)) (type: bigint), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (-1.389 / UDFToDouble(ctinyint)) (type: double), (UDFToDouble(cbigint) % cdouble) (type: double), (- csmallint) (type: smallint), (UDFToInteger(csmallint) + (cint + UDFToInteger(csmallint))) (type: int)
@@ -1670,7 +1670,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((-1.389 >= UDFToDouble(cint)) and ((csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint)))) or (((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint))))) (type: boolean)
+                    predicate: (((-1.389 >= UDFToDouble(cint)) and (csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint))) or ((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint)))) (type: boolean)
                     Statistics: Num rows: 3868 Data size: 118746 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double)
@@ -2085,7 +2085,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and ((UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
+                    predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
                     Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdouble (type: double), cfloat (type: float)
@@ -2340,7 +2340,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and (cboolean2 is not null and (cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))))) or ((UDFToDouble(ctimestamp2) = -5.0) or (((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or ((cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint)))))))) (type: boolean)
+                    predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cstring1 (type: string), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
@@ -2672,7 +2672,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and ((cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257))) or (((cint >= -257) and (cstring1 is not null and (cboolean1 >= 1))) or (cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))))) (type: boolean)
+                    predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and cstring1 is not null and (cboolean1 >= 1)) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))) (type: boolean)
                     Statistics: Num rows: 4778 Data size: 146682 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
index 7f824f1..c2250e6 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
@@ -45,7 +45,7 @@ STAGE PLANS:
         TableScan
           alias: alltypesorc
           Filter Operator
-            predicate: ((csmallint = 418) or ((csmallint = 12205) or (csmallint = 10583))) (type: boolean)
+            predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
             Select Operator
               expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
               outputColumnNames: _col0, _col1, _col2

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
index c53f0c4..be58a2b 100644
--- a/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
@@ -323,7 +323,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col22, _col26, _col50, _col58
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Filter Operator
-                  predicate: (((_col0 = _col58) and (_col22 = _col26)) and (_col50) IN ('KS', 'AL', 'MN', 'AL', 'SC', 'VT')) (type: boolean)
+                  predicate: ((_col0 = _col58) and (_col22 = _col26) and (_col50) IN ('KS', 'AL', 'MN', 'AL', 'SC', 'VT')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: _col50 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
index 9bd62ad..cfbe9ce 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
@@ -994,7 +994,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cstring2 like '%b%') or ((79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble))) (type: boolean)
+                    predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_13.q.out b/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
index 687add6..6214640 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
@@ -86,7 +86,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > 11.0) and ((UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
+                    predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                     Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
@@ -339,7 +339,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > -1.388) and ((UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
+                    predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                     Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_15.q.out b/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
index 456768e..1858cb0 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
@@ -82,7 +82,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cstring2 like '%ss%') or ((cstring1 like '10%') or ((cint >= -75) and ((UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))))) (type: boolean)
+                    predicate: ((cstring2 like '%ss%') or (cstring1 like '10%') or ((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_17.q.out b/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
index b5c71a4..1719176 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
@@ -67,7 +67,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or ((UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)))) (type: boolean)
+                    predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
                     Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % UDFToDouble(cbigint)) (type: double), (2563.58 + (- (- cdouble))) (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_7.q.out b/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
index b4003ef..61cd932 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
@@ -73,7 +73,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or ((UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
+                    predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
                     Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)
@@ -262,7 +262,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or ((UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
+                    predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
                     Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_8.q.out b/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
index 096aca9..3267860 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
@@ -69,7 +69,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cstring2 is not null and ((UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0))) or ((cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0)))) (type: boolean)
+                    predicate: ((cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0)) or (cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0))) (type: boolean)
                     Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double)
@@ -245,7 +245,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cstring2 is not null and ((UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998))) or ((cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0)))) (type: boolean)
+                    predicate: ((cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998)) or (cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0))) (type: boolean)
                     Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
index 9729a8f..5e0d42c 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
@@ -152,7 +152,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((762 = cbigint) or (((UDFToFloat(csmallint) < cfloat) and ((UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint)))) or ((cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and ((cstring2 <> 'a') and ((79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))))))) (type: boolean)
+                    predicate: ((762 = cbigint) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and (cstring2 <> 'a') and (79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cdouble (type: double), csmallint (type: smallint), cfloat (type: float), ctinyint (type: tinyint)
@@ -364,7 +364,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or (((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or (((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))))) (type: boolean)
+                    predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or ((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean)
                     Statistics: Num rows: 6826 Data size: 1467614 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cbigint (type: bigint), csmallint (type: smallint), cdouble (type: double), ctinyint (type: tinyint)
@@ -567,7 +567,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((ctimestamp1 = ctimestamp2) or ((762.0 = cfloat) or ((cstring1 = 'ss') or (((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and (ctimestamp2 is not null and (cstring2 > 'a'))))))) (type: boolean)
+                    predicate: ((ctimestamp1 = ctimestamp2) or (762.0 = cfloat) or (cstring1 = 'ss') or ((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and ctimestamp2 is not null and (cstring2 > 'a'))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cdouble (type: double)
@@ -749,7 +749,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ctimestamp2 <= ctimestamp1) and ((UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1))) or (((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0))) (type: boolean)
+                    predicate: (((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or ((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0)) (type: boolean)
                     Statistics: Num rows: 8874 Data size: 1907941 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), cbigint (type: bigint), cint (type: int), cfloat (type: float)
@@ -939,7 +939,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or (((1 <> cboolean2) and ((UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint)))) or (((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))))) (type: boolean)
+                    predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or ((1 <> cboolean2) and (UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint))) or ((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))) (type: boolean)
                     Statistics: Num rows: 9898 Data size: 2128105 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728 * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - UDFToDouble(cint)) (type: double), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - UDFToDouble(cint)) - -26.28) (type: double), (- cfloat) (type: float), (cdouble * -89010.0) (type: double), (UDFToDouble(ctinyint) / 988888.0) (type: double), (- ctinyint) (type: tinyint), (79.553 / UDFToDouble(ctinyint)) (type: double)
@@ -1197,7 +1197,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or ((cbigint = 359) or ((cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))))) (type: boolean)
+                    predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or (cbigint = 359) or (cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (UDFToDouble(cbigint) % 79.553) (type: double), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569 % cbigint) (type: bigint), (359.0 - cdouble) (type: double), (- csmallint) (type: smallint)
@@ -1404,7 +1404,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or (((cdouble <= UDFToDouble(cbigint)) and ((cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble))) or ((UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))))) (type: boolean)
+                    predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or ((cdouble <= UDFToDouble(cbigint)) and (cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble)) or (UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))) (type: boolean)
                     Statistics: Num rows: 10922 Data size: 2348269 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cstring1 (type: string), cboolean2 (type: boolean), ctimestamp2 (type: timestamp), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), cboolean1 (type: boolean), (cint + UDFToInteger(csmallint)) (type: int), (cbigint - UDFToLong(ctinyint)) (type: bigint), (- cbigint) (type: bigint), (- cfloat) (type: float), ((cbigint - UDFToLong(ctinyint)) + cbigint) (type: bigint), (cdouble / cdouble) (type: double), (- cdouble) (type: double), (UDFToLong((cint + UDFToInteger(csmallint))) * (- cbigint)) (type: bigint), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (-1.389 / UDFToDouble(ctinyint)) (type: double), (UDFToDouble(cbigint) % cdouble) (type: double), (- csmallint) (type: smallint), (UDFToInteger(csmallint) + (cint + UDFToInteger(csmallint))) (type: int)
@@ -1670,7 +1670,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((-1.389 >= UDFToDouble(cint)) and ((csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint)))) or (((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint))))) (type: boolean)
+                    predicate: (((-1.389 >= UDFToDouble(cint)) and (csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint))) or ((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint)))) (type: boolean)
                     Statistics: Num rows: 3868 Data size: 831633 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double)
@@ -2085,7 +2085,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and ((UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
+                    predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
                     Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdouble (type: double), cfloat (type: float)
@@ -2340,7 +2340,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and (cboolean2 is not null and (cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))))) or ((UDFToDouble(ctimestamp2) = -5.0) or (((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or ((cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint)))))))) (type: boolean)
+                    predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cstring1 (type: string), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
@@ -2672,7 +2672,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and ((cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257))) or (((cint >= -257) and (cstring1 is not null and (cboolean1 >= 1))) or (cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))))) (type: boolean)
+                    predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and cstring1 is not null and (cboolean1 >= 1)) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))) (type: boolean)
                     Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
index 7f824f1..c2250e6 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
@@ -45,7 +45,7 @@ STAGE PLANS:
         TableScan
           alias: alltypesorc
           Filter Operator
-            predicate: ((csmallint = 418) or ((csmallint = 12205) or (csmallint = 10583))) (type: boolean)
+            predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
             Select Operator
               expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
               outputColumnNames: _col0, _col1, _col2

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/udf_or.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_or.q.out b/ql/src/test/results/clientpositive/udf_or.q.out
index e03cbcd..cd3e2da 100644
--- a/ql/src/test/results/clientpositive/udf_or.q.out
+++ b/ql/src/test/results/clientpositive/udf_or.q.out
@@ -2,9 +2,9 @@ PREHOOK: query: DESCRIBE FUNCTION or
 PREHOOK: type: DESCFUNCTION
 POSTHOOK: query: DESCRIBE FUNCTION or
 POSTHOOK: type: DESCFUNCTION
-a or b - Logical or
+a1 or a2 or ... or an - Logical or
 PREHOOK: query: DESCRIBE FUNCTION EXTENDED or
 PREHOOK: type: DESCFUNCTION
 POSTHOOK: query: DESCRIBE FUNCTION EXTENDED or
 POSTHOOK: type: DESCFUNCTION
-a or b - Logical or
+a1 or a2 or ... or an - Logical or

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
index 4619403..288025d 100644
--- a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
+++ b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
@@ -320,7 +320,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col22, _col26, _col50, _col58
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: (((_col0 = _col58) and (_col22 = _col26)) and (_col50) IN ('KS', 'AL', 'MN', 'AL', 'SC', 'VT')) (type: boolean)
+            predicate: ((_col0 = _col58) and (_col22 = _col26) and (_col50) IN ('KS', 'AL', 'MN', 'AL', 'SC', 'VT')) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col50 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_0.q.out b/ql/src/test/results/clientpositive/vectorization_0.q.out
index 531bc84..3fab2ff 100644
--- a/ql/src/test/results/clientpositive/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_0.q.out
@@ -1010,7 +1010,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((cstring2 like '%b%') or ((79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble))) (type: boolean)
+              predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_13.q.out b/ql/src/test/results/clientpositive/vectorization_13.q.out
index bfc8ad2..95cb09a 100644
--- a/ql/src/test/results/clientpositive/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_13.q.out
@@ -81,7 +81,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > 11.0) and ((UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
+              predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
               Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
@@ -337,7 +337,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > -1.388) and ((UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
+              predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
               Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_15.q.out b/ql/src/test/results/clientpositive/vectorization_15.q.out
index 0031e94..da0e8e0 100644
--- a/ql/src/test/results/clientpositive/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_15.q.out
@@ -77,7 +77,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((cstring2 like '%ss%') or ((cstring1 like '10%') or ((cint >= -75) and ((UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))))) (type: boolean)
+              predicate: ((cstring2 like '%ss%') or (cstring1 like '10%') or ((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_17.q.out b/ql/src/test/results/clientpositive/vectorization_17.q.out
index ece918c..3d58e68 100644
--- a/ql/src/test/results/clientpositive/vectorization_17.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_17.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or ((UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)))) (type: boolean)
+              predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
               Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % UDFToDouble(cbigint)) (type: double), (2563.58 + (- (- cdouble))) (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_7.q.out b/ql/src/test/results/clientpositive/vectorization_7.q.out
index d4252a7..6e2a0ea 100644
--- a/ql/src/test/results/clientpositive/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_7.q.out
@@ -68,7 +68,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or ((UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
+              predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
               Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)
@@ -250,7 +250,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or ((UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
+              predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
               Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_8.q.out b/ql/src/test/results/clientpositive/vectorization_8.q.out
index c4ff1bc..c38fad1 100644
--- a/ql/src/test/results/clientpositive/vectorization_8.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_8.q.out
@@ -64,7 +64,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((cstring2 is not null and ((UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0))) or ((cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0)))) (type: boolean)
+              predicate: ((cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0)) or (cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0))) (type: boolean)
               Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double)
@@ -233,7 +233,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((cstring2 is not null and ((UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998))) or ((cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0)))) (type: boolean)
+              predicate: ((cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998)) or (cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0))) (type: boolean)
               Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double)


[30/50] [abbrv] hive git commit: HIVE-11493: Predicate with integer column equals double evaluates to false (Pengcheng Xiong, reviewed by Hari Sankar Sivarama Subramaniyan)

Posted by se...@apache.org.
HIVE-11493: Predicate with integer column equals double evaluates to false (Pengcheng Xiong, reviewed by Hari Sankar Sivarama Subramaniyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b8f1ae11
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b8f1ae11
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b8f1ae11

Branch: refs/heads/hbase-metastore
Commit: b8f1ae110616a8fe162f79140c785f76be76fc67
Parents: 6e76291
Author: Pengcheng Xiong <px...@apache.org>
Authored: Thu Aug 13 21:01:54 2015 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Thu Aug 13 21:02:40 2015 -0700

----------------------------------------------------------------------
 .../hive/ql/parse/TypeCheckProcFactory.java     |  2 +-
 .../clientpositive/cast_tinyint_to_double.q     |  7 ++++
 .../clientpositive/cast_tinyint_to_double.q.out | 38 ++++++++++++++++++++
 .../clientpositive/infer_const_type.q.out       |  7 ++--
 .../clientpositive/spark/vectorization_0.q.out  |  2 +-
 .../spark/vectorization_short_regress.q.out     | 20 +++++------
 .../clientpositive/tez/vectorization_0.q.out    |  2 +-
 .../tez/vectorization_short_regress.q.out       | 20 +++++------
 .../clientpositive/vectorization_0.q.out        |  2 +-
 .../vectorization_short_regress.q.out           | 20 +++++------
 10 files changed, 84 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index cd68f4e..ab5d006 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -1034,7 +1034,7 @@ public class TypeCheckProcFactory {
               // we'll try again to convert it to double
               // however, if we already tried this, or the column is NUMBER type and
               // the operator is EQUAL, return false due to the type mismatch
-              if (triedDouble ||
+              if (triedDouble &&
                   (genericUDF instanceof GenericUDFOPEqual
                   && !columnType.equals(serdeConstants.STRING_TYPE_NAME))) {
                 return new ExprNodeConstantDesc(false);

http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q b/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q
new file mode 100644
index 0000000..59c5e89
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q
@@ -0,0 +1,7 @@
+drop table t;
+CREATE TABLE t(c tinyint);
+insert overwrite table t select 10 from src limit 1;
+
+select * from t where c = 10.0;
+
+select * from t where c = -10.0;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out b/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out
new file mode 100644
index 0000000..c29df65
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out
@@ -0,0 +1,38 @@
+PREHOOK: query: drop table t
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table t
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE t(c tinyint)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t
+POSTHOOK: query: CREATE TABLE t(c tinyint)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t
+PREHOOK: query: insert overwrite table t select 10 from src limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t
+POSTHOOK: query: insert overwrite table t select 10 from src limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t
+POSTHOOK: Lineage: t.c EXPRESSION []
+PREHOOK: query: select * from t where c = 10.0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t where c = 10.0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####
+10
+PREHOOK: query: select * from t where c = -10.0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t where c = -10.0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/infer_const_type.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_const_type.q.out b/ql/src/test/results/clientpositive/infer_const_type.q.out
index 6368e4a..05c6a45 100644
--- a/ql/src/test/results/clientpositive/infer_const_type.q.out
+++ b/ql/src/test/results/clientpositive/infer_const_type.q.out
@@ -102,6 +102,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@infertypes
 #### A masked pattern was here ####
 127	32767	12345	-12345	906.0	-307.0	1234
+WARNING: Comparing a bigint and a double may result in a loss of precision.
 PREHOOK: query: -- all should return false as all numbers exceeed the largest number 
 -- which could be represented by the corresponding type
 -- and string_col = long_const should return false
@@ -136,7 +137,7 @@ STAGE PLANS:
             alias: infertypes
             Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: false (type: boolean)
+              predicate: ((UDFToDouble(ti) = 128.0) or (UDFToDouble(si) = 32768.0) or (UDFToDouble(i) = 2.147483648E9) or (UDFToDouble(bi) = 9.223372036854776E18)) (type: boolean)
               Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ti (type: tinyint), si (type: smallint), i (type: int), bi (type: bigint), fl (type: float), db (type: double), str (type: string)
@@ -156,6 +157,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+WARNING: Comparing a bigint and a double may result in a loss of precision.
 PREHOOK: query: SELECT * FROM infertypes WHERE
   ti  = '128' OR
   si  = 32768 OR
@@ -200,7 +202,7 @@ STAGE PLANS:
             alias: infertypes
             Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: false (type: boolean)
+              predicate: ((UDFToDouble(ti) = 127.0) or (UDFToDouble(si) = 327.0) or (UDFToDouble(i) = -100.0)) (type: boolean)
               Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ti (type: tinyint), si (type: smallint), i (type: int), bi (type: bigint), fl (type: float), db (type: double), str (type: string)
@@ -234,6 +236,7 @@ POSTHOOK: query: SELECT * FROM infertypes WHERE
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@infertypes
 #### A masked pattern was here ####
+127	32767	12345	-12345	906.0	-307.0	1234
 PREHOOK: query: EXPLAIN SELECT * FROM infertypes WHERE
   ti < '127.0' AND
   i > '100.0' AND

http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
index 90bcc1b..3ad059c 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
@@ -994,7 +994,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
+                    predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble) or ((UDFToShort(ctinyint) >= csmallint) and (cboolean2 = 1) and (3569.0 = UDFToDouble(ctinyint)))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
index 6bd1bb2..3d17aba 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
@@ -2085,23 +2085,23 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
-                    Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or (2563.58 = UDFToDouble(ctinyint)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
+                    Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdouble (type: double), cfloat (type: float)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: var_samp(_col0), count(_col1), sum(_col1), var_pop(_col0), stddev_pop(_col0), sum(_col0)
                         keys: _col0 (type: double)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                        Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col0 (type: double)
                           sort order: +
                           Map-reduce partition columns: _col0 (type: double)
-                          Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: bigint), _col3 (type: double), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: double)
             Execution mode: vectorized
         Reducer 2 
@@ -2111,25 +2111,25 @@ STAGE PLANS:
                 keys: KEY._col0 (type: double)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Statistics: Num rows: 303 Data size: 9301 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1327 Data size: 40738 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: double), _col1 (type: double), _col5 (type: double), (_col0 + _col1) (type: double), (_col0 * 762.0) (type: double), _col6 (type: double), (-863.257 % (_col0 * 762.0)) (type: double), (2563.58 * _col1) (type: double), (- _col1) (type: double), _col2 (type: bigint), ((2563.58 * _col1) + -5638.15) (type: double), ((- _col1) * ((2563.58 * _col1) + -5638.15)) (type: double), _col3 (type: double), _col4 (type: double), (_col0 - (- _col1)) (type: double)
                   outputColumnNames: _col0, _col1, _col10, _col11, _col12, _col13, _col14, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
-                  Statistics: Num rows: 303 Data size: 9301 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1327 Data size: 40738 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: double)
                     sort order: +
-                    Statistics: Num rows: 303 Data size: 9301 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1327 Data size: 40738 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double)
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: double), VALUE._col13 (type: double), VALUE._col12 (type: double)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-                Statistics: Num rows: 303 Data size: 9301 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1327 Data size: 40738 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 303 Data size: 9301 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1327 Data size: 40738 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
index cfbe9ce..18e042d 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
@@ -994,7 +994,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
+                    predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble) or ((UDFToShort(ctinyint) >= csmallint) and (cboolean2 = 1) and (3569.0 = UDFToDouble(ctinyint)))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
index 5e0d42c..59b457a 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
@@ -2085,23 +2085,23 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
-                    Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or (2563.58 = UDFToDouble(ctinyint)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
+                    Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdouble (type: double), cfloat (type: float)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: var_samp(_col0), count(_col1), sum(_col1), var_pop(_col0), stddev_pop(_col0), sum(_col0)
                         keys: _col0 (type: double)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                        Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col0 (type: double)
                           sort order: +
                           Map-reduce partition columns: _col0 (type: double)
-                          Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: bigint), _col3 (type: double), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: double)
             Execution mode: vectorized
         Reducer 2 
@@ -2111,25 +2111,25 @@ STAGE PLANS:
                 keys: KEY._col0 (type: double)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: double), _col1 (type: double), _col5 (type: double), (_col0 + _col1) (type: double), (_col0 * 762.0) (type: double), _col6 (type: double), (-863.257 % (_col0 * 762.0)) (type: double), (2563.58 * _col1) (type: double), (- _col1) (type: double), _col2 (type: bigint), ((2563.58 * _col1) + -5638.15) (type: double), ((- _col1) * ((2563.58 * _col1) + -5638.15)) (type: double), _col3 (type: double), _col4 (type: double), (_col0 - (- _col1)) (type: double)
                   outputColumnNames: _col0, _col1, _col10, _col11, _col12, _col13, _col14, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
-                  Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: double)
                     sort order: +
-                    Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double)
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: double), VALUE._col13 (type: double), VALUE._col12 (type: double)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-                Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_0.q.out b/ql/src/test/results/clientpositive/vectorization_0.q.out
index 3fab2ff..89163cd 100644
--- a/ql/src/test/results/clientpositive/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_0.q.out
@@ -1010,7 +1010,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
+              predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble) or ((UDFToShort(ctinyint) >= csmallint) and (cboolean2 = 1) and (3569.0 = UDFToDouble(ctinyint)))) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
index f2cb3ec..728f628 100644
--- a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
@@ -2031,23 +2031,23 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
-              Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or (2563.58 = UDFToDouble(ctinyint)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
+              Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cdouble (type: double), cfloat (type: float)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: var_samp(_col0), count(_col1), sum(_col1), var_pop(_col0), stddev_pop(_col0), sum(_col0)
                   keys: _col0 (type: double)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                  Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: double)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: double)
-                    Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: bigint), _col3 (type: double), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: double)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -2056,11 +2056,11 @@ STAGE PLANS:
           keys: KEY._col0 (type: double)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-          Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: double), _col1 (type: double), _col5 (type: double), (_col0 + _col1) (type: double), (_col0 * 762.0) (type: double), _col6 (type: double), (-863.257 % (_col0 * 762.0)) (type: double), (2563.58 * _col1) (type: double), (- _col1) (type: double), _col2 (type: bigint), ((2563.58 * _col1) + -5638.15) (type: double), ((- _col1) * ((2563.58 * _col1) + -5638.15)) (type: double), _col3 (type: double), _col4 (type: double), (_col0 - (- _col1)) (type: double)
             outputColumnNames: _col0, _col1, _col10, _col11, _col12, _col13, _col14, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
-            Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -2075,16 +2075,16 @@ STAGE PLANS:
             Reduce Output Operator
               key expressions: _col0 (type: double)
               sort order: +
-              Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double)
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: double), VALUE._col13 (type: double), VALUE._col12 (type: double)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
-          Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat


[23/50] [abbrv] hive git commit: HIVE-11461 : Transform flat AND/OR into IN struct clause (Jesus Camacho Rodriguez, Ashutosh Chauhan via Gopal V)

Posted by se...@apache.org.
HIVE-11461 : Transform flat AND/OR into IN struct clause (Jesus Camacho Rodriguez, Ashutosh Chauhan via Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/139101d6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/139101d6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/139101d6

Branch: refs/heads/hbase-metastore
Commit: 139101d6cf3be23d9ec8a88a9a75b5969434607b
Parents: 5b67f35
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Thu Aug 13 09:16:28 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Aug 13 09:16:28 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +
 .../hadoop/hive/ql/lib/PreOrderOnceWalker.java  |  44 +++
 .../hadoop/hive/ql/optimizer/Optimizer.java     |   6 +
 .../hive/ql/optimizer/PointLookupOptimizer.java | 280 +++++++++++++++++++
 .../ql/optimizer/pcr/PcrExprProcFactory.java    |   3 +-
 .../hive/ql/optimizer/ppr/OpProcFactory.java    |   3 +-
 .../apache/hadoop/hive/ql/plan/FilterDesc.java  |  14 +-
 .../annotate_stats_deep_filters.q               |   3 +-
 .../alter_partition_coltype.q.out               |  12 +-
 .../clientpositive/annotate_stats_filter.q.out  |   8 +-
 .../results/clientpositive/flatten_and_or.q.out |   8 +-
 ql/src/test/results/clientpositive/pcr.q.out    |  12 +-
 .../results/clientpositive/ppd_transform.q.out  |  12 +-
 .../test/results/clientpositive/spark/pcr.q.out |  12 +-
 .../clientpositive/spark/ppd_transform.q.out    |  12 +-
 .../clientpositive/spark/vectorized_case.q.out  |   2 +-
 .../clientpositive/tez/explainuser_1.q.out      |   2 +-
 .../clientpositive/tez/vectorized_case.q.out    |   2 +-
 .../clientpositive/vectorized_case.q.out        |   9 +-
 19 files changed, 397 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index d1cb5fb..11b9f78 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1186,6 +1186,8 @@ public class HiveConf extends Configuration {
         "Whether to transitively replicate predicate filters over equijoin conditions."),
     HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true,
         "Whether to push predicates down into storage handlers.  Ignored when hive.optimize.ppd is false."),
+    HIVEPOINTLOOKUPOPTIMIZER("hive.optimize.point.lookup", true,
+         "Whether to transform OR clauses in Filter operators into IN clauses"),
     // Constant propagation optimizer
     HIVEOPTCONSTANTPROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"),
     HIVEIDENTITYPROJECTREMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"),

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderOnceWalker.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderOnceWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderOnceWalker.java
new file mode 100644
index 0000000..d891fc2
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/PreOrderOnceWalker.java
@@ -0,0 +1,44 @@
+package org.apache.hadoop.hive.ql.lib;
+
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+/**
+ * Graph walker this class takes list of starting nodes and walks them in pre-order.
+ * If a rule fires up against a given node, we do not try to apply the rule
+ * on its children.
+ */
+public class PreOrderOnceWalker extends PreOrderWalker {
+
+  public PreOrderOnceWalker(Dispatcher disp) {
+    super(disp);
+  }
+
+  /**
+   * Walk the current operator and its descendants.
+   * 
+   * @param nd
+   *          current operator in the graph
+   * @throws SemanticException
+   */
+  @Override
+  public void walk(Node nd) throws SemanticException {
+    opStack.push(nd);
+    dispatch(nd, opStack);
+
+    // The rule has been applied, we bail out
+    if (retMap.get(nd) != null) {
+      opStack.pop();
+      return;
+    }
+
+    // move all the children to the front of queue
+    if (nd.getChildren() != null) {
+      for (Node n : nd.getChildren()) {
+        walk(n);
+      }
+    }
+
+    opStack.pop();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
index c4e11b9..14f362f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
@@ -81,6 +81,12 @@ public class Optimizer {
       // are combined and may become eligible for reduction (like is not null filter).
         transformations.add(new ConstantPropagate());
     }
+
+    // Try to transform OR predicates in Filter into IN clauses.
+    if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZER)) {
+      transformations.add(new PointLookupOptimizer());
+    }
+
     if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD)) {
       transformations.add(new PartitionPruner());
       transformations.add(new PartitionConditionRemover());

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
new file mode 100644
index 0000000..6a8acec
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/PointLookupOptimizer.java
@@ -0,0 +1,280 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Stack;
+
+import org.apache.calcite.util.Pair;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.FilterOperator;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
+import org.apache.hadoop.hive.ql.lib.Dispatcher;
+import org.apache.hadoop.hive.ql.lib.ForwardWalker;
+import org.apache.hadoop.hive.ql.lib.GraphWalker;
+import org.apache.hadoop.hive.ql.lib.Node;
+import org.apache.hadoop.hive.ql.lib.NodeProcessor;
+import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
+import org.apache.hadoop.hive.ql.lib.PreOrderOnceWalker;
+import org.apache.hadoop.hive.ql.lib.Rule;
+import org.apache.hadoop.hive.ql.lib.RuleRegExp;
+import org.apache.hadoop.hive.ql.lib.TypeRule;
+import org.apache.hadoop.hive.ql.parse.ParseContext;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+
+import com.google.common.collect.ArrayListMultimap;
+import com.google.common.collect.ListMultimap;
+
+/**
+ * This optimization will take a Filter expression, and if its predicate contains
+ * an OR operator whose children are constant equality expressions, it will try
+ * to generate an IN clause (which is more efficient). If the OR operator contains
+ * AND operator children, the optimization might generate an IN clause that uses
+ * structs.
+ */
+public class PointLookupOptimizer implements Transform {
+
+  private static final Log LOG = LogFactory.getLog(PointLookupOptimizer.class);
+  private static final String IN_UDF =
+          GenericUDFIn.class.getAnnotation(Description.class).name();
+  private static final String STRUCT_UDF =
+          GenericUDFStruct.class.getAnnotation(Description.class).name();
+
+
+  @Override
+  public ParseContext transform(ParseContext pctx) throws SemanticException {
+    // 1. Trigger transformation
+    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
+    opRules.put(new RuleRegExp("R1", FilterOperator.getOperatorName() + "%"), new FilterTransformer());
+
+    Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null);
+    GraphWalker ogw = new ForwardWalker(disp);
+
+    List<Node> topNodes = new ArrayList<Node>();
+    topNodes.addAll(pctx.getTopOps().values());
+    ogw.startWalking(topNodes, null);
+    return pctx;
+  }
+
+  private class FilterTransformer implements NodeProcessor {
+
+    @Override
+    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+        Object... nodeOutputs) throws SemanticException {
+      FilterOperator filterOp = (FilterOperator) nd;
+      ExprNodeDesc predicate = filterOp.getConf().getPredicate();
+
+      // Generate the list bucketing pruning predicate
+      ExprNodeDesc newPredicate = generateInClause(predicate);
+      if (newPredicate != null) {
+        // Replace filter in current FIL with new FIL
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Generated new predicate with IN clause: " + newPredicate);
+        }
+        filterOp.getConf().setOrigPredicate(predicate);
+        filterOp.getConf().setPredicate(newPredicate);
+      }
+
+      return null;
+    }
+
+    private ExprNodeDesc generateInClause(ExprNodeDesc predicate) throws SemanticException {
+      Map<Rule, NodeProcessor> exprRules = new LinkedHashMap<Rule, NodeProcessor>();
+      exprRules.put(new TypeRule(ExprNodeGenericFuncDesc.class), new OrExprProcessor());
+
+      // The dispatcher fires the processor corresponding to the closest matching
+      // rule and passes the context along
+      Dispatcher disp = new DefaultRuleDispatcher(null, exprRules, null);
+      GraphWalker egw = new PreOrderOnceWalker(disp);
+
+      List<Node> startNodes = new ArrayList<Node>();
+      startNodes.add(predicate);
+
+      HashMap<Node, Object> outputMap = new HashMap<Node, Object>();
+      egw.startWalking(startNodes, outputMap);
+      return (ExprNodeDesc) outputMap.get(predicate);
+    }
+  }
+
+  private class OrExprProcessor implements NodeProcessor {
+
+    @Override
+    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+        Object... nodeOutputs) throws SemanticException {
+      ExprNodeGenericFuncDesc fd = (ExprNodeGenericFuncDesc) nd;
+
+      // 1. If it is not an OR operator, we bail out.
+      if (!FunctionRegistry.isOpOr(fd)) {
+        return null;
+      }
+
+      // 2. It is an OR operator
+      List<ExprNodeDesc> children = fd.getChildren();
+      ListMultimap<String,Pair<ExprNodeColumnDesc, ExprNodeConstantDesc>> columnConstantsMap =
+              ArrayListMultimap.create();
+      boolean modeAnd = false;
+      for (int i = 0; i < children.size(); i++) {
+        ExprNodeDesc child = children.get(i);
+
+        // - If the child is an AND operator, extract its children
+        // - Otherwise, take the child itself
+        final List<ExprNodeDesc> conjunctions;
+        if (FunctionRegistry.isOpAnd(child)) {
+          // If it is the first child, we set the mode variable value
+          // Otherwise, if the mode we are working on is different, we
+          // bail out
+          if (i == 0) {
+            modeAnd = true;
+          } else {
+            if (!modeAnd) {
+              return null;
+            }
+          }
+
+          // Multiple children
+          conjunctions = child.getChildren();
+        } else {
+          // If it is the first child, we set the mode variable value
+          // Otherwise, if the mode we are working on is different, we
+          // bail out
+          if (i == 0) {
+            modeAnd = false;
+          } else {
+            if (modeAnd) {
+              return null;
+            }
+          }
+
+          // One child
+          conjunctions = new ArrayList<ExprNodeDesc>(1);
+          conjunctions.add(child);
+        }
+
+        // 3. We will extract the literals to introduce in the IN clause.
+        //    If the patterns OR-AND-EqOp or OR-EqOp are not matched, we bail out
+        for (ExprNodeDesc conjunction: conjunctions) {
+          if (!(conjunction instanceof ExprNodeGenericFuncDesc)) {
+            return null;
+          }
+
+          ExprNodeGenericFuncDesc conjCall = (ExprNodeGenericFuncDesc) conjunction;
+          Class<? extends GenericUDF> genericUdfClass = conjCall.getGenericUDF().getClass();
+          if(GenericUDFOPEqual.class == genericUdfClass) {
+            if (conjCall.getChildren().get(0) instanceof ExprNodeColumnDesc &&
+                    conjCall.getChildren().get(1) instanceof ExprNodeConstantDesc) {
+              ExprNodeColumnDesc ref = (ExprNodeColumnDesc) conjCall.getChildren().get(0);
+              String refString = ref.toString();
+              columnConstantsMap.put(refString,
+                      new Pair<ExprNodeColumnDesc,ExprNodeConstantDesc>(
+                              ref, (ExprNodeConstantDesc) conjCall.getChildren().get(1)));
+              if (columnConstantsMap.get(refString).size() != i+1) {
+                // If we have not added to this column desc before, we bail out
+                return null;
+              }
+            } else if (conjCall.getChildren().get(1) instanceof ExprNodeColumnDesc &&
+                    conjCall.getChildren().get(0) instanceof ExprNodeConstantDesc) {
+              ExprNodeColumnDesc ref = (ExprNodeColumnDesc) conjCall.getChildren().get(1);
+              String refString = ref.toString();
+              columnConstantsMap.put(refString,
+                      new Pair<ExprNodeColumnDesc,ExprNodeConstantDesc>(
+                              ref, (ExprNodeConstantDesc) conjCall.getChildren().get(0)));
+              if (columnConstantsMap.get(refString).size() != i+1) {
+                // If we have not added to this column desc before, we bail out
+                return null;
+              }
+            } else {
+              // We bail out
+              return null;
+            }
+          } else {
+            // We bail out
+            return null;
+          }
+        }
+      }
+
+      // 4. We build the new predicate and return it
+      ExprNodeDesc newPredicate = null;
+      List<ExprNodeDesc> newChildren = new ArrayList<ExprNodeDesc>(children.size());
+      // 4.1 Create structs
+      List<ExprNodeDesc> columns = new ArrayList<ExprNodeDesc>();
+      List<String> names = new ArrayList<String>();
+      List<TypeInfo> typeInfos = new ArrayList<TypeInfo>();
+      for (int i = 0; i < children.size(); i++) {
+        List<ExprNodeDesc> constantFields = new ArrayList<ExprNodeDesc>(children.size());
+
+        for (String keyString : columnConstantsMap.keySet()) {
+          Pair<ExprNodeColumnDesc, ExprNodeConstantDesc> columnConstant =
+                  columnConstantsMap.get(keyString).get(i);
+          if (i == 0) {
+            columns.add(columnConstant.left);
+            names.add(columnConstant.left.getColumn());
+            typeInfos.add(columnConstant.left.getTypeInfo());
+          }
+          constantFields.add(columnConstant.right);
+        }
+
+        if (i == 0) {
+          ExprNodeDesc columnsRefs;
+          if (columns.size() == 1) {
+            columnsRefs = columns.get(0);
+          } else {
+            columnsRefs = new ExprNodeGenericFuncDesc(
+                    TypeInfoFactory.getStructTypeInfo(names, typeInfos),
+                    FunctionRegistry.getFunctionInfo(STRUCT_UDF).getGenericUDF(),
+                    columns);
+          }
+          newChildren.add(columnsRefs);
+        }
+        ExprNodeDesc values;
+        if (constantFields.size() == 1) {
+          values = constantFields.get(0);
+        } else {
+          values = new ExprNodeGenericFuncDesc(
+                  TypeInfoFactory.getStructTypeInfo(names, typeInfos),
+                  FunctionRegistry.getFunctionInfo(STRUCT_UDF).getGenericUDF(),
+                  constantFields);
+        }
+        newChildren.add(values);
+      }
+      newPredicate = new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
+              FunctionRegistry.getFunctionInfo(IN_UDF).getGenericUDF(), newChildren);
+
+      return newPredicate;
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
index 71a6c73..825938a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 
 /**
@@ -379,7 +380,7 @@ public final class PcrExprProcFactory {
           }
         }
 
-        if (has_part_col) {
+        if (has_part_col && fd.getTypeInfo().getCategory() == Category.PRIMITIVE) {
           //  we need to evaluate result for every pruned partition
           if (fd.getTypeInfo().equals(TypeInfoFactory.booleanTypeInfo)) {
             // if the return type of the GenericUDF is boolean and all partitions agree on

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
index fd51628..7262164 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java
@@ -55,7 +55,8 @@ public final class OpProcFactory extends PrunerOperatorFactory {
         TableScanOperator top) throws SemanticException, UDFArgumentException {
       OpWalkerCtx owc = (OpWalkerCtx) procCtx;
       // Otherwise this is not a sampling predicate and we need to
-      ExprNodeDesc predicate = fop.getConf().getPredicate();
+      ExprNodeDesc predicate = fop.getConf().getOrigPredicate();
+      predicate = predicate == null ? fop.getConf().getPredicate() : predicate;
       String alias = top.getConf().getAlias();
 
       // Generate the partition pruning predicate

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
index 5408dc8..6a31689 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java
@@ -70,14 +70,16 @@ public class FilterDesc extends AbstractOperatorDesc {
       SampleDesc desc = new SampleDesc(numerator, denominator, null, inputPruning);
       return desc;
     }
-    
+
+    @Override
     public String toString() {
-      return inputPruning ? "BUCKET " + numerator + " OUT OF " + denominator: null;  
+      return inputPruning ? "BUCKET " + numerator + " OUT OF " + denominator: null;
     }
   }
 
   private static final long serialVersionUID = 1L;
   private org.apache.hadoop.hive.ql.plan.ExprNodeDesc predicate;
+  private transient ExprNodeDesc origPredicate;
   private boolean isSamplingPred;
   private transient SampleDesc sampleDescr;
   //Is this a filter that should perform a comparison for sorted searches
@@ -149,6 +151,14 @@ public class FilterDesc extends AbstractOperatorDesc {
     this.isSortedFilter = isSortedFilter;
   }
 
+  public void setOrigPredicate(ExprNodeDesc origPredicate) {
+    this.origPredicate = origPredicate;
+  }
+
+  public ExprNodeDesc getOrigPredicate() {
+    return origPredicate;
+  }
+
   /**
    * Some filters are generated or implied, which means it is not in the query.
    * It is added by the analyzer. For example, when we do an inner join, we add

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q b/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q
index c027532..e01a654 100644
--- a/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q
+++ b/ql/src/test/queries/clientpositive/annotate_stats_deep_filters.q
@@ -20,6 +20,7 @@ analyze table over1k compute statistics;
 analyze table over1k compute statistics for columns;
 
 set hive.stats.fetch.column.stats=true;
+set hive.optimize.point.lookup=false;
 explain select count(*) from over1k where (
 (t=1 and si=2)
 or (t=2 and si=3)
@@ -63,4 +64,4 @@ or (t=17 and si=18)
 or (t=27 and si=28)
 or (t=37 and si=38)
 or (t=47 and si=48)
-or (t=52 and si=53));
\ No newline at end of file
+or (t=52 and si=53));

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
index 9fc3c8d..06515da 100644
--- a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
@@ -1134,11 +1134,15 @@ STAGE PLANS:
           alias: alterdynamic_part_table
           Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
           GatherStats: false
-          Select Operator
-            expressions: intcol (type: string)
-            outputColumnNames: _col0
+          Filter Operator
+            isSamplingPred: false
+            predicate: (struct(partcol1,partcol2)) IN (const struct(2,'1'), const struct(1,'__HIVE_DEFAULT_PARTITION__')) (type: boolean)
             Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
-            ListSink
+            Select Operator
+              expressions: intcol (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
+              ListSink
 
 PREHOOK: query: select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__')
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
index 492e302..af1e1c3 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
@@ -678,15 +678,15 @@ STAGE PLANS:
             alias: loc_orc
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: ((state = 'OH') or (state = 'CA')) (type: boolean)
-              Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+              predicate: (state) IN ('OH', 'CA') (type: boolean)
+              Statistics: Num rows: 4 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 4 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 4 Data size: 396 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/flatten_and_or.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/flatten_and_or.q.out b/ql/src/test/results/clientpositive/flatten_and_or.q.out
index 9c51ff3..5f25daa 100644
--- a/ql/src/test/results/clientpositive/flatten_and_or.q.out
+++ b/ql/src/test/results/clientpositive/flatten_and_or.q.out
@@ -44,15 +44,15 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key = '0') and (value = '8')) or ((key = '1') and (value = '5')) or ((key = '2') and (value = '6')) or ((key = '3') and (value = '8')) or ((key = '4') and (value = '1')) or ((key = '5') and (value = '6')) or ((key = '6') and (value = '1')) or ((key = '7') and (value = '1')) or ((key = '8') and (value = '1')) or ((key = '9') and (value = '1')) or ((key = '10') and (value = '3'))) (type: boolean)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              predicate: (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3')) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pcr.q.out b/ql/src/test/results/clientpositive/pcr.q.out
index d7c40a3..4c9ea77 100644
--- a/ql/src/test/results/clientpositive/pcr.q.out
+++ b/ql/src/test/results/clientpositive/pcr.q.out
@@ -2475,16 +2475,16 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (((ds = '2000-04-08') and (key = 1)) or ((ds = '2000-04-09') and (key = 2))) (type: boolean)
-              Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+              predicate: (struct(key,ds)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09')) (type: boolean)
+              Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string), ds (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string)
                   sort order: +++
-                  Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
                   tag: -1
                   auto parallelism: false
       Path -> Alias:
@@ -2588,13 +2588,13 @@ STAGE PLANS:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/ppd_transform.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_transform.q.out b/ql/src/test/results/clientpositive/ppd_transform.q.out
index 17248e4..f536767 100644
--- a/ql/src/test/results/clientpositive/ppd_transform.q.out
+++ b/ql/src/test/results/clientpositive/ppd_transform.q.out
@@ -390,21 +390,21 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: ((_col0 = 'a') or (_col0 = 'b')) (type: boolean)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  predicate: (_col0) IN ('a', 'b') (type: boolean)
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 Filter Operator
-                  predicate: ((_col0 = 'c') or (_col0 = 'd')) (type: boolean)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  predicate: (_col0) IN ('c', 'd') (type: boolean)
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/spark/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/pcr.q.out b/ql/src/test/results/clientpositive/spark/pcr.q.out
index fb08f10..5aa0df8 100644
--- a/ql/src/test/results/clientpositive/spark/pcr.q.out
+++ b/ql/src/test/results/clientpositive/spark/pcr.q.out
@@ -2534,16 +2534,16 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (((ds = '2000-04-08') and (key = 1)) or ((ds = '2000-04-09') and (key = 2))) (type: boolean)
-                    Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (struct(key,ds)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09')) (type: boolean)
+                    Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), value (type: string), ds (type: string)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string)
                         sort order: +++
-                        Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
                         tag: -1
                         auto parallelism: false
             Path -> Alias:
@@ -2648,13 +2648,13 @@ STAGE PLANS:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   GlobalTableId: 0
 #### A masked pattern was here ####
                   NumFilesPerFileSink: 1
-                  Statistics: Num rows: 40 Data size: 320 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/spark/ppd_transform.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_transform.q.out b/ql/src/test/results/clientpositive/spark/ppd_transform.q.out
index 52a847a..a6e6e38 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_transform.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_transform.q.out
@@ -405,21 +405,21 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       Filter Operator
-                        predicate: ((_col0 = 'a') or (_col0 = 'b')) (type: boolean)
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                        predicate: (_col0) IN ('a', 'b') (type: boolean)
+                        Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.TextInputFormat
                               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       Filter Operator
-                        predicate: ((_col0 = 'c') or (_col0 = 'd')) (type: boolean)
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                        predicate: (_col0) IN ('c', 'd') (type: boolean)
+                        Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.TextInputFormat
                               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
index c2250e6..54003c3 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
@@ -45,7 +45,7 @@ STAGE PLANS:
         TableScan
           alias: alltypesorc
           Filter Operator
-            predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
+            predicate: (csmallint) IN (418, 12205, 10583) (type: boolean)
             Select Operator
               expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
               outputColumnNames: _col0, _col1, _col2

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index 9756b0c..e8a9786 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -2909,7 +2909,7 @@ Stage-0
       Select Operator [SEL_2]
          outputColumnNames:["_col0"]
          Filter Operator [FIL_4]
-            predicate:((c_int = -6) or (c_int = 6)) (type: boolean)
+            predicate:(c_int) IN (-6, 6) (type: boolean)
             TableScan [TS_0]
                alias:cbo_t1
 

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
index c2250e6..54003c3 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
@@ -45,7 +45,7 @@ STAGE PLANS:
         TableScan
           alias: alltypesorc
           Filter Operator
-            predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
+            predicate: (csmallint) IN (418, 12205, 10583) (type: boolean)
             Select Operator
               expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
               outputColumnNames: _col0, _col1, _col2

http://git-wip-us.apache.org/repos/asf/hive/blob/139101d6/ql/src/test/results/clientpositive/vectorized_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_case.q.out b/ql/src/test/results/clientpositive/vectorized_case.q.out
index 73bf12d..9e47014 100644
--- a/ql/src/test/results/clientpositive/vectorized_case.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_case.q.out
@@ -46,20 +46,19 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
-              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+              predicate: (csmallint) IN (418, 12205, 10583) (type: boolean)
+              Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-      Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator


[29/50] [abbrv] hive git commit: HIVE-11538 : Add an option to skip init script while running tests (Ashutosh Chauhan via Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11538 : Add an option to skip init script while running tests (Ashutosh Chauhan via Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6e762919
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6e762919
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6e762919

Branch: refs/heads/hbase-metastore
Commit: 6e7629193ed7b8714bd5ae5ab48bd10c2cbd85cf
Parents: 433ea9c
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Thu Aug 13 20:42:33 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Aug 13 20:43:46 2015 -0700

----------------------------------------------------------------------
 itests/qtest/pom.xml                            | 26 ++++----
 .../org/apache/hadoop/hive/ql/QTestUtil.java    | 62 ++++++++++----------
 2 files changed, 44 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6e762919/itests/qtest/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index 44d30da..0588233 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -30,7 +30,7 @@
 
   <properties>
     <hive.path.to.root>../..</hive.path.to.root>
-
+    <initScript>q_test_init.sql</initScript>
     <qfile></qfile>
     <qfile_regex></qfile_regex>
     <run_disabled>false</run_disabled>
@@ -420,7 +420,7 @@
                   logFile="${project.build.directory}/testparseneggen.log"
                   hadoopVersion="${active.hadoop.version}"
                   logDirectory="${project.build.directory}/qfile-results/negative/"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
                 <!-- Cli -->
@@ -437,7 +437,7 @@
                   logFile="${project.build.directory}/testclidrivergen.log"
                   logDirectory="${project.build.directory}/qfile-results/clientpositive/"
                   hadoopVersion="${active.hadoop.version}"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
                 <!-- Negative Cli -->
@@ -454,7 +454,7 @@
                   logFile="${project.build.directory}/testnegativeclidrivergen.log"
                   logDirectory="${project.build.directory}/qfile-results/clientnegative/"
                   hadoopVersion="${active.hadoop.version}"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
                 <!-- Compare Cli -->
@@ -470,7 +470,7 @@
                   logFile="${project.build.directory}/testcompareclidrivergen.log"
                   logDirectory="${project.build.directory}/qfile-results/clientcompare/"
                   hadoopVersion="${active.hadoop.version}"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
                 <!-- Minimr -->
@@ -487,7 +487,7 @@
                   logFile="${project.build.directory}/testminimrclidrivergen.log"
                   logDirectory="${project.build.directory}/qfile-results/clientpositive/"
                   hadoopVersion="${active.hadoop.version}"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
                 <if>
@@ -508,7 +508,7 @@
                               logFile="${project.build.directory}/testminitezclidrivergen.log"
                               logDirectory="${project.build.directory}/qfile-results/clientpositive/"
                               hadoopVersion="${active.hadoop.version}"
-                              initScript="q_test_init.sql"
+                              initScript="${initScript}"
                               cleanupScript="q_test_cleanup.sql"/>
 
                     <qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
@@ -547,7 +547,7 @@
                   logFile="${project.build.directory}/testnegativeminimrclidrivergen.log"
                   logDirectory="${project.build.directory}/qfile-results/clientnegative/"
                   hadoopVersion="${hadoopVersion}"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
                 <!-- HBase Positive -->
@@ -561,7 +561,7 @@
                   resultsDirectory="${basedir}/${hive.path.to.root}/hbase-handler/src/test/results/positive/" className="TestHBaseCliDriver"
                   logFile="${project.build.directory}/testhbaseclidrivergen.log"
                   logDirectory="${project.build.directory}/qfile-results/hbase-handler/positive/"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
                 <!-- HBase Minimr -->
@@ -575,7 +575,7 @@
                   resultsDirectory="${basedir}/${hive.path.to.root}/hbase-handler/src/test/results/positive/" className="TestHBaseMinimrCliDriver"
                   logFile="${project.build.directory}/testhbaseminimrclidrivergen.log"
                   logDirectory="${project.build.directory}/qfile-results/hbase-handler/minimrpositive/"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
                 <!-- HBase Negative -->
@@ -589,7 +589,7 @@
                   resultsDirectory="${basedir}/${hive.path.to.root}/hbase-handler/src/test/results/negative/" className="TestHBaseNegativeCliDriver"
                   logFile="${project.build.directory}/testhbasenegativeclidrivergen.log"
                   logDirectory="${project.build.directory}/qfile-results/hbase-handler/negative"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
                 <!-- Beeline -->
@@ -626,7 +626,7 @@
                   logFile="${project.build.directory}/testcontribclidrivergen.log"
                   logDirectory="${project.build.directory}/qfile-results/contribclientpositive"
                   hadoopVersion="${hadoopVersion}"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
                 <qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
@@ -639,7 +639,7 @@
                   resultsDirectory="${basedir}/${hive.path.to.root}/contrib/src/test/results/clientnegative/" className="TestContribNegativeCliDriver"
                   logFile="${project.build.directory}/testcontribnegclidrivergen.log"
                   logDirectory="${project.build.directory}/qfile-results/contribclientnegative"
-                  initScript="q_test_init.sql"
+                  initScript="${initScript}"
                   cleanupScript="q_test_cleanup.sql"/>
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/6e762919/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 39d5d9e..3fae0ba 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -121,8 +121,8 @@ public class QTestUtil {
 
   private static final Log LOG = LogFactory.getLog("QTestUtil");
   private static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES";
-  private final String defaultInitScript = "q_test_init.sql";
-  private final String defaultCleanupScript = "q_test_cleanup.sql";
+  private final static String defaultInitScript = "q_test_init.sql";
+  private final static String defaultCleanupScript = "q_test_cleanup.sql";
   private final String[] testOnlyCommands = new String[]{"crypto"};
 
   private String testWarehouse;
@@ -149,7 +149,6 @@ public class QTestUtil {
   private HadoopShims.MiniMrShim mr = null;
   private HadoopShims.MiniDFSShim dfs = null;
   private HadoopShims.HdfsEncryptionShim hes = null;
-  private boolean miniMr = false;
   private String hadoopVer = null;
   private QTestSetup setup = null;
   private SparkSession sparkSession = null;
@@ -209,7 +208,7 @@ public class QTestUtil {
           continue;
         }
 
-        if (file.isDir()) {
+        if (file.isDirectory()) {
           if (!destFs.exists(local_path)) {
             destFs.mkdirs(local_path);
           }
@@ -410,14 +409,9 @@ public class QTestUtil {
     if (scriptsDir == null) {
       scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
     }
-    if (initScript.isEmpty()) {
-      initScript = defaultInitScript;
-    }
-    if (cleanupScript.isEmpty()) {
-      cleanupScript = defaultCleanupScript;
-    }
-    this.initScript = scriptsDir + "/" + initScript;
-    this.cleanupScript = scriptsDir + "/" + cleanupScript;
+
+    this.initScript = scriptsDir + File.separator + initScript;
+    this.cleanupScript = scriptsDir + File.separator + cleanupScript;
 
     overWrite = "true".equalsIgnoreCase(System.getProperty("test.output.overwrite"));
 
@@ -705,7 +699,7 @@ public class QTestUtil {
       FileSystem fileSystem = p.getFileSystem(conf);
       if (fileSystem.exists(p)) {
         for (FileStatus status : fileSystem.listStatus(p)) {
-          if (status.isDir() && !srcTables.contains(status.getPath().getName())) {
+          if (status.isDirectory() && !srcTables.contains(status.getPath().getName())) {
             fileSystem.delete(status.getPath(), true);
           }
         }
@@ -755,16 +749,19 @@ public class QTestUtil {
     clearTablesCreatedDuringTests();
     clearKeysCreatedInTests();
 
-    SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", true);
-
-    String cleanupCommands = readEntireFileIntoString(new File(cleanupScript));
-    LOG.info("Cleanup (" + cleanupScript + "):\n" + cleanupCommands);
-    if(cliDriver == null) {
-      cliDriver = new CliDriver();
+    File cleanupFile = new File(cleanupScript);
+    if (cleanupFile.isFile()) {
+      String cleanupCommands = readEntireFileIntoString(cleanupFile);
+      LOG.info("Cleanup (" + cleanupScript + "):\n" + cleanupCommands);
+      if(cliDriver == null) {
+        cliDriver = new CliDriver();
+      }
+      SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", true);
+      cliDriver.processLine(cleanupCommands);
+      SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", false);
+    } else {
+      LOG.info("No cleanup script detected. Skipping.");
     }
-    cliDriver.processLine(cleanupCommands);
-
-    SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", false);
 
     // delete any contents in the warehouse dir
     Path p = new Path(testWarehouse);
@@ -809,14 +806,21 @@ public class QTestUtil {
     if(!isSessionStateStarted) {
       startSessionState();
     }
-    conf.setBoolean("hive.test.init.phase", true);
 
-    String initCommands = readEntireFileIntoString(new File(this.initScript));
-    LOG.info("Initial setup (" + initScript + "):\n" + initCommands);
     if(cliDriver == null) {
       cliDriver = new CliDriver();
     }
     cliDriver.processLine("set test.data.dir=" + testFiles + ";");
+    File scriptFile = new File(this.initScript);
+    if (!scriptFile.isFile()) {
+      LOG.info("No init script detected. Skipping");
+      return;
+    }
+    conf.setBoolean("hive.test.init.phase", true);
+
+    String initCommands = readEntireFileIntoString(scriptFile);
+    LOG.info("Initial setup (" + initScript + "):\n" + initCommands);
+
     cliDriver.processLine(initCommands);
 
     conf.setBoolean("hive.test.init.phase", false);
@@ -912,6 +916,7 @@ public class QTestUtil {
 
   private CliSessionState createSessionState() {
    return new CliSessionState(conf) {
+      @Override
       public void setSparkSession(SparkSession sparkSession) {
         super.setSparkSession(sparkSession);
         if (sparkSession != null) {
@@ -1136,11 +1141,6 @@ public class QTestUtil {
     return commands;
   }
 
-  private boolean isComment(final String line) {
-    String lineTrimmed = line.trim();
-    return lineTrimmed.startsWith("#") || lineTrimmed.startsWith("--");
-  }
-
   public boolean shouldBeSkipped(String tname) {
     return qSkipSet.contains(tname);
   }
@@ -1816,7 +1816,7 @@ public class QTestUtil {
   {
     QTestUtil[] qt = new QTestUtil[qfiles.length];
     for (int i = 0; i < qfiles.length; i++) {
-      qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20", "", "");
+      qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20", defaultInitScript, defaultCleanupScript);
       qt[i].addFile(qfiles[i]);
       qt[i].clearTestSideEffects();
     }


[33/50] [abbrv] hive git commit: HIVE-11304: Migrate to Log4j2 from Log4j 1.x (Prasanth Jayachandran reviewed by Thejas Nair, Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/itests/pom.xml
----------------------------------------------------------------------
diff --git a/itests/pom.xml b/itests/pom.xml
index f156cc4..acce713 100644
--- a/itests/pom.xml
+++ b/itests/pom.xml
@@ -94,7 +94,7 @@
                       }
                       mkdir -p $DOWNLOAD_DIR
                       download "http://d3jw87u4immizc.cloudfront.net/spark-tarball/spark-${spark.version}-bin-hadoop2-without-hive.tgz" "spark"
-                      cp -f $HIVE_ROOT/data/conf/spark/log4j.properties $BASE_DIR/spark/conf/
+                      cp -f $HIVE_ROOT/data/conf/spark/log4j2.xml $BASE_DIR/spark/conf/
                       sed '/package /d' ${basedir}/${hive.path.to.root}/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleAdd.java > /tmp/UDFExampleAdd.java
                       javac -cp  ${settings.localRepository}/org/apache/hive/hive-exec/${project.version}/hive-exec-${project.version}.jar /tmp/UDFExampleAdd.java -d /tmp
                       jar -cf /tmp/udfexampleadd-1.0.jar -C /tmp UDFExampleAdd.class

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index b33cb58..65117c4 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -5931,7 +5931,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     // If the log4j.configuration property hasn't already been explicitly set,
     // use Hive's default log4j configuration
-    if (System.getProperty("log4j.configuration") == null) {
+    if (System.getProperty("log4j.configurationFile") == null) {
       // NOTE: It is critical to do this here so that log4j is reinitialized
       // before any of the other core hive classes are loaded
       try {

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
index ad99427..df42f1a 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
@@ -17,15 +17,11 @@
  */
 package org.apache.hadoop.hive.metastore.txn;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.*;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertNotNull;
+import static junit.framework.Assert.assertNull;
+import static junit.framework.Assert.assertTrue;
+import static junit.framework.Assert.fail;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -34,7 +30,29 @@ import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
-import static junit.framework.Assert.*;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
+import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
+import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockLevel;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
+import org.apache.hadoop.hive.metastore.api.LockState;
+import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
+import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
+import org.apache.hadoop.hive.metastore.api.UnlockRequest;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
 
 /**
  * Tests for TxnHandler.
@@ -43,11 +61,9 @@ public class TestCompactionTxnHandler {
 
   private HiveConf conf = new HiveConf();
   private CompactionTxnHandler txnHandler;
-  static final private Log LOG = LogFactory.getLog(TestCompactionTxnHandler.class);
 
   public TestCompactionTxnHandler() throws Exception {
     TxnDbUtil.setConfValues(conf);
-    LogManager.getLogger(TxnHandler.class.getName()).setLevel(Level.DEBUG);
     tearDown();
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
index f478184..6dc0bd3 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
@@ -17,16 +17,11 @@
  */
 package org.apache.hadoop.hive.metastore.txn;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.*;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertFalse;
+import static junit.framework.Assert.assertNull;
+import static junit.framework.Assert.assertTrue;
+import static junit.framework.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.SQLException;
@@ -36,21 +31,66 @@ import java.util.List;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import static junit.framework.Assert.*;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
+import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
+import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockLevel;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
+import org.apache.hadoop.hive.metastore.api.LockState;
+import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
+import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnInfo;
+import org.apache.hadoop.hive.metastore.api.TxnOpenException;
+import org.apache.hadoop.hive.metastore.api.TxnState;
+import org.apache.hadoop.hive.metastore.api.UnlockRequest;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests for TxnHandler.
  */
 public class TestTxnHandler {
   static final private String CLASS_NAME = TxnHandler.class.getName();
-  static final private Log LOG = LogFactory.getLog(CLASS_NAME);
+  private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
 
   private HiveConf conf = new HiveConf();
   private TxnHandler txnHandler;
 
   public TestTxnHandler() throws Exception {
     TxnDbUtil.setConfValues(conf);
-    LogManager.getLogger(TxnHandler.class.getName()).setLevel(Level.DEBUG);
+    LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+    Configuration conf = ctx.getConfiguration();
+    conf.getLoggerConfig(CLASS_NAME).setLevel(Level.DEBUG);
+    ctx.updateLoggers(conf);
     tearDown();
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/packaging/src/main/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/packaging/src/main/assembly/bin.xml b/packaging/src/main/assembly/bin.xml
index 63253c5..0fa6af8 100644
--- a/packaging/src/main/assembly/bin.xml
+++ b/packaging/src/main/assembly/bin.xml
@@ -275,7 +275,7 @@
       <fileMode>644</fileMode>
       <includes>
         <include>webhcat-default.xml</include>
-        <include>webhcat-log4j.properties</include>
+        <include>webhcat-log4j2.xml</include>
       </includes>
       <filtered>true</filtered>
       <outputDirectory>hcatalog/etc/webhcat</outputDirectory>
@@ -323,19 +323,19 @@
 
   <files>
     <file>
-      <source>${project.parent.basedir}/common/src/main/resources/hive-log4j.properties</source>
+      <source>${project.parent.basedir}/common/src/main/resources/hive-log4j2.xml</source>
       <outputDirectory>conf</outputDirectory>
-      <destName>hive-log4j.properties.template</destName>
+      <destName>hive-log4j2.xml.template</destName>
     </file>
     <file>
-      <source>${project.parent.basedir}/ql/src/main/resources/hive-exec-log4j.properties</source>
+      <source>${project.parent.basedir}/ql/src/main/resources/hive-exec-log4j2.xml</source>
       <outputDirectory>conf</outputDirectory>
-      <destName>hive-exec-log4j.properties.template</destName>
+      <destName>hive-exec-log4j2.xml.template</destName>
     </file>
     <file>
-      <source>${project.parent.basedir}/beeline/src/main/resources/beeline-log4j.properties</source>
+      <source>${project.parent.basedir}/beeline/src/main/resources/beeline-log4j2.xml</source>
       <outputDirectory>conf</outputDirectory>
-      <destName>beeline-log4j.properties.template</destName>
+      <destName>beeline-log4j2.xml.template</destName>
     </file>
     <file>
       <source>${project.parent.basedir}/hcatalog/README.txt</source>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0383e01..15c2805 100644
--- a/pom.xml
+++ b/pom.xml
@@ -148,8 +148,7 @@
     <kryo.version>2.22</kryo.version>
     <libfb303.version>0.9.2</libfb303.version>
     <libthrift.version>0.9.2</libthrift.version>
-    <log4j.version>1.2.16</log4j.version>
-    <log4j-extras.version>1.2.17</log4j-extras.version>
+    <log4j2.version>2.3</log4j2.version>
     <opencsv.version>2.3</opencsv.version>
     <mockito-all.version>1.9.5</mockito-all.version>
     <mina.version>2.0.0-M5</mina.version>
@@ -366,14 +365,24 @@
         <version>${junit.version}</version>
       </dependency>
       <dependency>
-        <groupId>log4j</groupId>
-        <artifactId>log4j</artifactId>
-        <version>${log4j.version}</version>
+        <groupId>org.apache.logging.log4j</groupId>
+        <artifactId>log4j-1.2-api</artifactId>
+        <version>${log4j2.version}</version>
       </dependency>
       <dependency>
-        <groupId>log4j</groupId>
-        <artifactId>apache-log4j-extras</artifactId>
-        <version>${log4j-extras.version}</version>
+        <groupId>org.apache.logging.log4j</groupId>
+        <artifactId>log4j-web</artifactId>
+        <version>${log4j2.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.logging.log4j</groupId>
+        <artifactId>log4j-slf4j-impl</artifactId>
+        <version>${log4j2.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.logging.log4j</groupId>
+        <artifactId>log4j-jcl</artifactId>
+        <version>${log4j2.version}</version>
       </dependency>
       <dependency>
         <groupId>org.antlr</groupId>
@@ -584,11 +593,6 @@
         <version>${slf4j.version}</version>
       </dependency>
       <dependency>
-        <groupId>org.slf4j</groupId>
-        <artifactId>slf4j-log4j12</artifactId>
-        <version>${slf4j.version}</version>
-      </dependency>
-      <dependency>
         <groupId>xerces</groupId>
         <artifactId>xercesImpl</artifactId>
         <version>${xerces.version}</version>
@@ -604,11 +608,6 @@
       <artifactId>slf4j-api</artifactId>
       <version>${slf4j.version}</version>
     </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-log4j12</artifactId>
-      <version>${slf4j.version}</version>
-    </dependency>
   </dependencies>
 
   <build>
@@ -872,7 +871,7 @@
             <!-- required for hive-exec jar path and tests which reference a jar -->
             <maven.local.repository>${maven.repo.local}</maven.local.repository>
             <mapred.job.tracker>local</mapred.job.tracker>
-            <log4j.configuration>${test.log4j.scheme}${test.tmp.dir}/conf/hive-log4j.properties</log4j.configuration>
+            <log4j.configurationFile>${test.log4j.scheme}${test.tmp.dir}/conf/hive-log4j2.xml</log4j.configurationFile>
             <log4j.debug>true</log4j.debug>
             <!-- don't diry up /tmp -->
             <java.io.tmpdir>${test.tmp.dir}</java.io.tmpdir>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/pom.xml
----------------------------------------------------------------------
diff --git a/ql/pom.xml b/ql/pom.xml
index e7a8e7b..36b3433 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -112,14 +112,19 @@
       <version>${javolution.version}</version>
     </dependency>
     <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <version>${log4j.version}</version>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-1.2-api</artifactId>
+      <version>${log4j2.version}</version>
     </dependency>
     <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>apache-log4j-extras</artifactId>
-      <version>${log4j-extras.version}</version>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-slf4j-impl</artifactId>
+      <version>${log4j2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-jcl</artifactId>
+      <version>${log4j2.version}</version>
     </dependency>
     <dependency>
       <groupId>org.antlr</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index a2cf712..82345ee 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -27,7 +27,6 @@ import java.lang.management.MemoryMXBean;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Enumeration;
 import java.util.List;
 import java.util.Properties;
 
@@ -57,13 +56,14 @@ import org.apache.hadoop.hive.ql.exec.PartitionKeySampler;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.exec.tez.TezSessionState;
 import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager;
+import org.apache.hadoop.hive.ql.exec.tez.TezSessionState;
 import org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
 import org.apache.hadoop.hive.ql.io.HiveKey;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormatImpl;
 import org.apache.hadoop.hive.ql.io.IOPrepareCache;
+import org.apache.hadoop.hive.ql.log.NullAppender;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.MapWork;
@@ -88,11 +88,12 @@ import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Partitioner;
 import org.apache.hadoop.mapred.RunningJob;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.log4j.Appender;
-import org.apache.log4j.BasicConfigurator;
-import org.apache.log4j.FileAppender;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.varia.NullAppender;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.appender.FileAppender;
+import org.apache.logging.log4j.core.appender.RollingFileAppender;
 
 /**
  * ExecDriver is the central class in co-ordinating execution of any map-reduce task.
@@ -687,8 +688,10 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
     if (noLog) {
       // If started from main(), and noLog is on, we should not output
       // any logs. To turn the log on, please set -Dtest.silent=false
-      BasicConfigurator.resetConfiguration();
-      BasicConfigurator.configure(new NullAppender());
+      Logger logger = org.apache.logging.log4j.LogManager.getRootLogger();
+      NullAppender appender = NullAppender.createNullAppender();
+      appender.addToLogger(logger.getName(), Level.ERROR);
+      appender.start();
     } else {
       setupChildLog4j(conf);
     }
@@ -703,10 +706,12 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
 
     // print out the location of the log file for the user so
     // that it's easy to find reason for local mode execution failures
-    for (Appender appender : Collections.list((Enumeration<Appender>) LogManager.getRootLogger()
-        .getAllAppenders())) {
+    for (Appender appender : ((org.apache.logging.log4j.core.Logger) LogManager.getRootLogger())
+            .getAppenders().values()) {
       if (appender instanceof FileAppender) {
-        console.printInfo("Execution log at: " + ((FileAppender) appender).getFile());
+        console.printInfo("Execution log at: " + ((FileAppender) appender).getFileName());
+      } else if (appender instanceof RollingFileAppender) {
+        console.printInfo("Execution log at: " + ((RollingFileAppender) appender).getFileName());
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
index 6a6593c..44dfe3e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
@@ -24,7 +24,6 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.Collections;
-import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
@@ -33,8 +32,6 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.ql.MapRedStats;
@@ -59,9 +56,11 @@ import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.mapred.RunningJob;
 import org.apache.hadoop.mapred.TaskCompletionEvent;
 import org.apache.hadoop.mapred.TaskReport;
-import org.apache.log4j.Appender;
-import org.apache.log4j.FileAppender;
-import org.apache.log4j.LogManager;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.Logger;
+import org.apache.logging.log4j.core.appender.FileAppender;
+import org.apache.logging.log4j.core.appender.RollingFileAppender;
 
 public class HadoopJobExecHelper {
 
@@ -492,10 +491,11 @@ public class HadoopJobExecHelper {
     sb.append("Logs:\n");
     console.printError(sb.toString());
 
-    for (Appender a : Collections.list((Enumeration<Appender>)
-          LogManager.getRootLogger().getAllAppenders())) {
-      if (a instanceof FileAppender) {
-        console.printError((new Path(((FileAppender)a).getFile())).toUri().getPath());
+    for (Appender appender : ((Logger) LogManager.getRootLogger()).getAppenders().values()) {
+      if (appender instanceof FileAppender) {
+        console.printError(((FileAppender) appender).getFileName());
+      } else if (appender instanceof RollingFileAppender) {
+        console.printError(((RollingFileAppender) appender).getFileName());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
index 3cb9e9c..cee0878 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.hive.ql.io.rcfile.stats;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Enumeration;
 import java.util.List;
 
 import org.apache.commons.lang.StringUtils;
@@ -59,9 +57,11 @@ import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.JobClient;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RunningJob;
-import org.apache.log4j.Appender;
-import org.apache.log4j.FileAppender;
-import org.apache.log4j.LogManager;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.Logger;
+import org.apache.logging.log4j.core.appender.FileAppender;
+import org.apache.logging.log4j.core.appender.RollingFileAppender;
 
 /**
  * PartialScanTask.
@@ -335,15 +335,15 @@ public class PartialScanTask extends Task<PartialScanWork> implements
 
     // print out the location of the log file for the user so
     // that it's easy to find reason for local mode execution failures
-    for (Appender appender : Collections
-        .list((Enumeration<Appender>) LogManager.getRootLogger()
-            .getAllAppenders())) {
+    for (Appender appender : ((Logger) LogManager.getRootLogger()).getAppenders().values()) {
       if (appender instanceof FileAppender) {
-        console.printInfo("Execution log at: "
-            + ((FileAppender) appender).getFile());
+        console.printInfo("Execution log at: " + ((FileAppender) appender).getFileName());
+      } else if (appender instanceof RollingFileAppender) {
+        console.printInfo("Execution log at: " + ((RollingFileAppender) appender).getFileName());
       }
     }
 
+
     PartialScanWork mergeWork = new PartialScanWork(inputPaths);
     DriverContext driverCxt = new DriverContext();
     PartialScanTask taskExec = new PartialScanTask();

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/log/HiveEventCounter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/HiveEventCounter.java b/ql/src/java/org/apache/hadoop/hive/ql/log/HiveEventCounter.java
new file mode 100644
index 0000000..46662c4
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/HiveEventCounter.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.log;
+
+import java.io.Serializable;
+import java.util.concurrent.atomic.AtomicLongArray;
+
+import org.apache.hadoop.hive.common.classification.InterfaceAudience;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.Filter;
+import org.apache.logging.log4j.core.Layout;
+import org.apache.logging.log4j.core.LogEvent;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.appender.AbstractAppender;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.apache.logging.log4j.core.config.plugins.Plugin;
+import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
+import org.apache.logging.log4j.core.config.plugins.PluginElement;
+import org.apache.logging.log4j.core.config.plugins.PluginFactory;
+import org.apache.logging.log4j.core.layout.PatternLayout;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * A log4J2 Appender that simply counts logging events in four levels:
+ * fatal, error, warn and info. The class name is used in log4j2.xml
+ */
+@Plugin(name = "HiveEventCounter", category = "Core", elementType = "appender", printObject = true)
+public class HiveEventCounter extends AbstractAppender {
+  private static LoggerContext context = (LoggerContext) LogManager.getContext(false);
+  private static Configuration configuration = context.getConfiguration();
+  private static final String APPENDER_NAME = "HiveEventCounter";
+  private static final int FATAL = 0;
+  private static final int ERROR = 1;
+  private static final int WARN = 2;
+  private static final int INFO = 3;
+
+  private static class EventCounts {
+    private final AtomicLongArray counts = new AtomicLongArray(4);
+
+    private void incr(int i) {
+      counts.incrementAndGet(i);
+    }
+
+    private long get(int i) {
+      return counts.get(i);
+    }
+  }
+
+  private static EventCounts counts = new EventCounts();
+
+  protected HiveEventCounter(String name, Filter filter,
+      Layout<? extends Serializable> layout, boolean ignoreExceptions) {
+    super(name, filter, layout, ignoreExceptions);
+  }
+
+  @PluginFactory
+  public static HiveEventCounter createInstance(@PluginAttribute("name") String name,
+      @PluginAttribute("ignoreExceptions") boolean ignoreExceptions,
+      @PluginElement("Layout") Layout layout,
+      @PluginElement("Filters") Filter filter) {
+    if (name == null) {
+      name = APPENDER_NAME;
+    }
+
+    if (layout == null) {
+      layout = PatternLayout.createDefaultLayout();
+    }
+    return new HiveEventCounter(name, filter, layout, ignoreExceptions);
+  }
+
+  @InterfaceAudience.Private
+  public static long getFatal() {
+    return counts.get(FATAL);
+  }
+
+  @InterfaceAudience.Private
+  public static long getError() {
+    return counts.get(ERROR);
+  }
+
+  @InterfaceAudience.Private
+  public static long getWarn() {
+    return counts.get(WARN);
+  }
+
+  @InterfaceAudience.Private
+  public static long getInfo() {
+    return counts.get(INFO);
+  }
+
+  @VisibleForTesting
+  public void addToLogger(String loggerName, Level level) {
+    LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
+    loggerConfig.addAppender(this, level, null);
+    context.updateLoggers();
+  }
+
+  @VisibleForTesting
+  public void removeFromLogger(String loggerName) {
+    LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
+    loggerConfig.removeAppender(APPENDER_NAME);
+    context.updateLoggers();
+  }
+
+  public void append(LogEvent event) {
+    Level level = event.getLevel();
+    if (level.equals(Level.INFO)) {
+      counts.incr(INFO);
+    } else if (level.equals(Level.WARN)) {
+      counts.incr(WARN);
+    } else if (level.equals(Level.ERROR)) {
+      counts.incr(ERROR);
+    } else if (level.equals(Level.FATAL)) {
+      counts.incr(FATAL);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java b/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java
new file mode 100644
index 0000000..c4cb7dd
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.log;
+
+import java.io.Serializable;
+
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.Filter;
+import org.apache.logging.log4j.core.Layout;
+import org.apache.logging.log4j.core.LogEvent;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.appender.AbstractAppender;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.apache.logging.log4j.core.config.plugins.Plugin;
+import org.apache.logging.log4j.core.config.plugins.PluginFactory;
+import org.apache.logging.log4j.core.layout.PatternLayout;
+
+/**
+ * A NullAppender merely exists, it never outputs a message to any device.
+ */
+@Plugin(name = "NullAppender", category = "Core", elementType = "appender", printObject = false)
+public class NullAppender extends AbstractAppender {
+
+  private static LoggerContext context = (LoggerContext) LogManager.getContext(false);
+  private static Configuration configuration = context.getConfiguration();
+
+  protected NullAppender(String name, Filter filter,
+      Layout<? extends Serializable> layout, boolean ignoreExceptions) {
+    super(name, filter, layout, ignoreExceptions);
+  }
+
+  @PluginFactory
+  public static NullAppender createNullAppender() {
+    return new NullAppender("NullAppender", null, PatternLayout.createDefaultLayout(), true);
+  }
+
+  public void addToLogger(String loggerName, Level level) {
+    LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
+    loggerConfig.addAppender(this, level, null);
+    context.updateLoggers();
+  }
+
+  public void append(LogEvent event) {
+    // no-op
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/log/PidDailyRollingFileAppender.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/PidDailyRollingFileAppender.java b/ql/src/java/org/apache/hadoop/hive/ql/log/PidDailyRollingFileAppender.java
deleted file mode 100644
index 6a59d4a..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/log/PidDailyRollingFileAppender.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.log;
-
-import java.lang.management.ManagementFactory;
-import java.lang.management.RuntimeMXBean;
-
-import org.apache.log4j.DailyRollingFileAppender;
-
-public class PidDailyRollingFileAppender extends DailyRollingFileAppender {
-
-  @Override
-  public void setFile(String file) {
-    RuntimeMXBean rt = ManagementFactory.getRuntimeMXBean();
-    super.setFile(file + '.' + rt.getName());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
new file mode 100644
index 0000000..4db10bb
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.log;
+
+import java.lang.management.ManagementFactory;
+
+import org.apache.logging.log4j.core.config.plugins.Plugin;
+import org.apache.logging.log4j.core.config.plugins.PluginFactory;
+import org.apache.logging.log4j.core.pattern.AbstractPatternConverter;
+import org.apache.logging.log4j.core.pattern.ArrayPatternConverter;
+import org.apache.logging.log4j.core.pattern.ConverterKeys;
+
+/**
+ * FilePattern converter that converts %pid pattern to <process-id>@<hostname> information
+ * obtained at runtime.
+ *
+ * Example usage:
+ * <RollingFile name="Rolling-default" fileName="test.log" filePattern="test.log.%pid.gz">
+ *
+ * Will generate output file with name containing <process-id>@<hostname> like below
+ * test.log.95232@localhost.gz
+ */
+@Plugin(name = "PidFilePatternConverter", category = "FileConverter")
+@ConverterKeys({ "pid" })
+public class PidFilePatternConverter extends AbstractPatternConverter implements
+    ArrayPatternConverter {
+
+  /**
+   * Private constructor.
+   */
+  private PidFilePatternConverter() {
+    super("pid", "pid");
+  }
+
+  @PluginFactory
+  public static PidFilePatternConverter newInstance() {
+    return new PidFilePatternConverter();
+  }
+
+  public void format(StringBuilder toAppendTo, Object... objects) {
+    toAppendTo.append(ManagementFactory.getRuntimeMXBean().getName());
+  }
+
+  public void format(Object obj, StringBuilder toAppendTo) {
+    toAppendTo.append(ManagementFactory.getRuntimeMXBean().getName());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/main/resources/hive-exec-log4j.properties
----------------------------------------------------------------------
diff --git a/ql/src/main/resources/hive-exec-log4j.properties b/ql/src/main/resources/hive-exec-log4j.properties
deleted file mode 100644
index 9eaa6b6..0000000
--- a/ql/src/main/resources/hive-exec-log4j.properties
+++ /dev/null
@@ -1,77 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hive.log.threshold=ALL
-hive.root.logger=INFO,FA
-hive.log.dir=${java.io.tmpdir}/${user.name}
-hive.query.id=hadoop
-hive.log.file=${hive.query.id}.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=${hive.log.threshold}
-
-#
-# File Appender
-#
-
-log4j.appender.FA=org.apache.log4j.FileAppender
-log4j.appender.FA.File=${hive.log.dir}/${hive.log.file}
-log4j.appender.FA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t] %p %c{2}: %m%n
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,FA
-log4j.category.Datastore=ERROR,FA
-log4j.category.Datastore.Schema=ERROR,FA
-log4j.category.JPOX.Datastore=ERROR,FA
-log4j.category.JPOX.Plugin=ERROR,FA
-log4j.category.JPOX.MetaData=ERROR,FA
-log4j.category.JPOX.Query=ERROR,FA
-log4j.category.JPOX.General=ERROR,FA
-log4j.category.JPOX.Enhancer=ERROR,FA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/main/resources/hive-exec-log4j2.xml
----------------------------------------------------------------------
diff --git a/ql/src/main/resources/hive-exec-log4j2.xml b/ql/src/main/resources/hive-exec-log4j2.xml
new file mode 100644
index 0000000..c93437c
--- /dev/null
+++ b/ql/src/main/resources/hive-exec-log4j2.xml
@@ -0,0 +1,110 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="HiveExecLog4j2"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="hive.log.threshold">ALL</Property>
+    <Property name="hive.log.level">INFO</Property>
+    <Property name="hive.root.logger">FA</Property>
+    <Property name="hive.log.dir">${sys:java.io.tmpdir}/${sys:user.name}</Property>
+    <Property name="hive.query.id">hadoop</Property>
+    <Property name="hive.log.file">${sys:hive.query.id}.log</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n"/>
+    </Console>
+
+    <!-- Regular File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <File name="FA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+    </File>
+
+    <!-- Daily Rolling File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session -->
+    <!-- <RollingFile name="DRFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
+     filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+      <Policies>
+        <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
+      </Policies>
+      <DefaultRolloverStrategy max="30"/>
+    </RollingFile> -->
+
+    <!-- Size based Rolling File Appender -->
+    <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
+    <!-- <RollingFile name="RFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
+     filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%i">
+      <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
+      <Policies>
+        <SizeBasedTriggeringPolicy size="256 MB" />
+      </Policies>
+      <DefaultRolloverStrategy max="10"/>
+    </RollingFile> -->
+
+    <!-- HiveEventCounter appender is loaded from Configuration packages attribute.Sends counts of logging messages at different severity levels to Hadoop Metrics. -->
+    <HiveEventCounter name="EventCounter"/>
+  </Appenders>
+
+  <Loggers>
+    <Root level="${sys:hive.log.threshold}">
+      <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
+      <AppenderRef ref="EventCounter" />
+    </Root>
+
+    <Logger name="org.apache.zookeeper.server.NIOServerCnxn" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="DataNucleus" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="Datastore.Schema" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Datastore" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Plugin" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Metadata" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Query" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.General" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+    <Logger name="JPOX.Enhancer" level="ERROR">
+      <AppenderRef ref="${sys:hive.root.logger}"/>
+    </Logger>
+  </Loggers>
+
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/test/org/apache/hadoop/hive/ql/log/TestLog4j2Appenders.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/log/TestLog4j2Appenders.java b/ql/src/test/org/apache/hadoop/hive/ql/log/TestLog4j2Appenders.java
new file mode 100644
index 0000000..bdd837e
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/log/TestLog4j2Appenders.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.log;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.hadoop.hive.ql.metadata.StringAppender;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class TestLog4j2Appenders {
+
+  @Before
+  public void setup() {
+    // programmatically set root logger level to INFO. By default if log4j2-test.xml is not
+    // available root logger will use ERROR log level
+    LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+    Configuration config = ctx.getConfiguration();
+    LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME);
+    loggerConfig.setLevel(Level.INFO);
+    ctx.updateLoggers();
+  }
+
+  @Test
+  public void testStringAppender() throws Exception {
+    // Get the RootLogger which, if you don't have log4j2-test.xml defined, will only log ERRORs
+    Logger logger = LogManager.getRootLogger();
+    // Create a String Appender to capture log output
+    StringAppender appender = StringAppender.createStringAppender("%m");
+    appender.addToLogger(logger.getName(), Level.INFO);
+    appender.start();
+
+    // Log to the string appender
+    logger.info("Hello!");
+    logger.info(" World");
+
+    assertEquals("Hello! World", appender.getOutput());
+    appender.removeFromLogger(LogManager.getRootLogger().getName());
+  }
+
+  @Test
+  public void testHiveEventCounterAppender() throws Exception {
+    Logger logger = LogManager.getRootLogger();
+    HiveEventCounter appender = HiveEventCounter.createInstance("EventCounter", true, null, null);
+    appender.addToLogger(logger.getName(), Level.INFO);
+    appender.start();
+
+    logger.info("Test");
+    logger.info("Test");
+    logger.info("Test");
+    logger.info("Test");
+
+    logger.error("Test");
+    logger.error("Test");
+    logger.error("Test");
+
+    logger.warn("Test");
+    logger.warn("Test");
+
+    logger.fatal("Test");
+
+    // HiveEventCounter will be loaded from hive-log4j2-test.xml before tests are run. The 2 log
+    // info msgs from previous test case will also be counted along with 4 log info msgs in this
+    // test and hence we assert for 6 here
+    assertEquals(6, appender.getInfo());
+    assertEquals(3, appender.getError());
+    assertEquals(2, appender.getWarn());
+    assertEquals(1, appender.getFatal());
+    appender.removeFromLogger(LogManager.getRootLogger().getName());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/test/org/apache/hadoop/hive/ql/metadata/StringAppender.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/StringAppender.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/StringAppender.java
new file mode 100644
index 0000000..17b64d6
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/StringAppender.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.metadata;
+
+import java.io.ByteArrayOutputStream;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Serializable;
+
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.Filter;
+import org.apache.logging.log4j.core.Layout;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender;
+import org.apache.logging.log4j.core.appender.OutputStreamManager;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.LoggerConfig;
+import org.apache.logging.log4j.core.config.plugins.Plugin;
+import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
+import org.apache.logging.log4j.core.config.plugins.PluginFactory;
+import org.apache.logging.log4j.core.layout.PatternLayout;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Log4j2 appender that writers to in-memory string object.
+ */
+@Plugin(name = "StringAppender", category = "Core", elementType = "appender", printObject = true)
+public class StringAppender
+    extends AbstractOutputStreamAppender<StringAppender.StringOutputStreamManager> {
+
+  private static final String APPENDER_NAME = "StringAppender";
+  private static LoggerContext context = (LoggerContext) LogManager.getContext(false);
+  private static Configuration configuration = context.getConfiguration();
+  private StringOutputStreamManager manager;
+
+  /**
+   * Instantiate a WriterAppender and set the output destination to a
+   * new {@link OutputStreamWriter} initialized with <code>os</code>
+   * as its {@link OutputStream}.
+   *
+   * @param name             The name of the Appender.
+   * @param layout           The layout to format the message.
+   * @param filter
+   * @param ignoreExceptions
+   * @param immediateFlush
+   * @param manager          The OutputStreamManager.
+   */
+  protected StringAppender(String name,
+      Layout<? extends Serializable> layout, Filter filter,
+      boolean ignoreExceptions, boolean immediateFlush,
+      StringOutputStreamManager manager) {
+    super(name, layout, filter, ignoreExceptions, immediateFlush, manager);
+    this.manager = manager;
+  }
+
+  @PluginFactory
+  public static StringAppender createStringAppender(
+      @PluginAttribute("name") String nullablePatternString) {
+    PatternLayout layout;
+    if (nullablePatternString == null) {
+      layout = PatternLayout.createDefaultLayout();
+    } else {
+      layout = PatternLayout.createLayout(nullablePatternString, configuration,
+          null, null, true, false, null, null);
+    }
+
+    return new StringAppender(APPENDER_NAME, layout, null, false, true,
+        new StringOutputStreamManager(new ByteArrayOutputStream(), "StringStream", layout));
+  }
+
+  @VisibleForTesting
+  public void addToLogger(String loggerName, Level level) {
+    LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
+    loggerConfig.addAppender(this, level, null);
+    context.updateLoggers();
+  }
+
+  @VisibleForTesting
+  public void removeFromLogger(String loggerName) {
+    LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
+    loggerConfig.removeAppender(APPENDER_NAME);
+    context.updateLoggers();
+  }
+
+  public String getOutput() {
+    manager.flush();
+    return new String(manager.getStream().toByteArray());
+  }
+
+  public void reset() {
+    manager.reset();
+  }
+
+  protected static class StringOutputStreamManager extends OutputStreamManager {
+    ByteArrayOutputStream stream;
+
+    protected StringOutputStreamManager(ByteArrayOutputStream os, String streamName,
+        Layout<?> layout) {
+      super(os, streamName, layout);
+      stream = os;
+    }
+
+    public ByteArrayOutputStream getStream() {
+      return stream;
+    }
+
+    public void reset() {
+      stream.reset();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index 99fbd5d..1e2feaa 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.metadata;
 
 import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;
 
-import java.io.StringWriter;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -29,8 +28,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.regex.Pattern;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -56,15 +53,19 @@ import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.hadoop.mapred.SequenceFileOutputFormat;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.PatternLayout;
-import org.apache.log4j.WriterAppender;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.LoggerConfig;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.junit.Assert;
 
 import com.google.common.collect.ImmutableMap;
 
+import junit.framework.TestCase;
+
 /**
  * TestHive.
  *
@@ -248,36 +249,39 @@ public class TestHive extends TestCase {
    * @throws Throwable
    */
   public void testMetaStoreApiTiming() throws Throwable {
-    // set log level to DEBUG, as this is logged at debug level
-    Logger logger = Logger.getLogger("hive.ql.metadata.Hive");
-    Level origLevel = logger.getLevel();
-    logger.setLevel(Level.DEBUG);
-
-    // create an appender to capture the logs in a string
-    StringWriter writer = new StringWriter();
-    WriterAppender appender = new WriterAppender(new PatternLayout(), writer);
+    // Get the RootLogger which, if you don't have log4j2-test.xml defined, will only log ERRORs
+    Logger logger = LogManager.getLogger("hive.ql.metadata.Hive");
+    Level oldLevel = logger.getLevel();
+    LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+    Configuration config = ctx.getConfiguration();
+    LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
+    loggerConfig.setLevel(Level.DEBUG);
+    ctx.updateLoggers();
+
+    // Create a String Appender to capture log output
+    StringAppender appender = StringAppender.createStringAppender("%m");
+    appender.addToLogger(logger.getName(), Level.DEBUG);
+    appender.start();
 
     try {
-      logger.addAppender(appender);
-
       hm.clearMetaCallTiming();
       hm.getAllDatabases();
       hm.dumpAndClearMetaCallTiming("test");
-      String logStr = writer.toString();
+      String logStr = appender.getOutput();
       String expectedString = "getAllDatabases_()=";
       Assert.assertTrue(logStr + " should contain <" + expectedString,
           logStr.contains(expectedString));
 
       // reset the log buffer, verify new dump without any api call does not contain func
-      writer.getBuffer().setLength(0);
+      appender.reset();
       hm.dumpAndClearMetaCallTiming("test");
-      logStr = writer.toString();
+      logStr = appender.getOutput();
       Assert.assertFalse(logStr + " should not contain <" + expectedString,
           logStr.contains(expectedString));
-
     } finally {
-      logger.setLevel(origLevel);
-      logger.removeAppender(appender);
+      loggerConfig.setLevel(oldLevel);
+      ctx.updateLoggers();
+      appender.removeFromLogger(logger.getName());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java b/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
index 9d64b10..876ade8 100644
--- a/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
+++ b/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
@@ -18,9 +18,6 @@
 
 package org.apache.hive.service.cli;
 
-import org.apache.log4j.Layout;
-import org.apache.log4j.PatternLayout;
-
 /**
  * CLIServiceUtils.
  *
@@ -29,10 +26,6 @@ public class CLIServiceUtils {
 
 
   private static final char SEARCH_STRING_ESCAPE = '\\';
-  public static final Layout verboseLayout = new PatternLayout(
-    "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n");
-  public static final Layout nonVerboseLayout = new PatternLayout(
-    "%-5p : %m%n");
 
   /**
    * Convert a SQL search pattern into an equivalent Java Regex.

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
index 70340bd..fb3921f 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
@@ -6,44 +6,94 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hive.service.cli.operation;
-import java.io.CharArrayWriter;
-import java.util.Enumeration;
+
+import java.io.ByteArrayOutputStream;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Serializable;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.session.OperationLog;
-import org.apache.hadoop.hive.ql.session.OperationLog.LoggingLevel;
-import org.apache.hive.service.cli.CLIServiceUtils;
-import org.apache.log4j.Appender;
-import org.apache.log4j.ConsoleAppender;
-import org.apache.log4j.Layout;
-import org.apache.log4j.Logger;
-import org.apache.log4j.WriterAppender;
-import org.apache.log4j.spi.Filter;
-import org.apache.log4j.spi.LoggingEvent;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.Filter;
+import org.apache.logging.log4j.core.Layout;
+import org.apache.logging.log4j.core.LogEvent;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender;
+import org.apache.logging.log4j.core.appender.ConsoleAppender;
+import org.apache.logging.log4j.core.appender.OutputStreamManager;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.filter.AbstractFilter;
+import org.apache.logging.log4j.core.layout.PatternLayout;
 
 import com.google.common.base.Joiner;
 
 /**
- * An Appender to divert logs from individual threads to the LogObject they belong to.
+ * Divert appender to redirect operation logs to separate files.
  */
-public class LogDivertAppender extends WriterAppender {
-  private static final Logger LOG = Logger.getLogger(LogDivertAppender.class.getName());
+public class LogDivertAppender
+    extends AbstractOutputStreamAppender<LogDivertAppender.StringOutputStreamManager> {
+  private static final Logger LOG = LogManager.getLogger(LogDivertAppender.class.getName());
+  private static LoggerContext context = (LoggerContext) LogManager.getContext(false);
+  private static Configuration configuration = context.getConfiguration();
+  public static final Layout<? extends Serializable> verboseLayout = PatternLayout.createLayout(
+      "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n", configuration, null, null, true, false, null, null);
+  public static final Layout<? extends Serializable> nonVerboseLayout = PatternLayout.createLayout(
+      "%-5p : %m%n", configuration, null, null, true, false, null, null);
+
   private final OperationManager operationManager;
+  private StringOutputStreamManager manager;
   private boolean isVerbose;
-  private Layout verboseLayout;
+  private final Layout<? extends Serializable> layout;
+
+  /**
+   * Instantiate a WriterAppender and set the output destination to a
+   * new {@link OutputStreamWriter} initialized with <code>os</code>
+   * as its {@link OutputStream}.
+   *
+   * @param name             The name of the Appender.
+   * @param filter           Filter
+   * @param manager          The OutputStreamManager.
+   * @param operationManager Operation manager
+   */
+  protected LogDivertAppender(String name, Filter filter,
+      StringOutputStreamManager manager, OperationManager operationManager,
+      OperationLog.LoggingLevel loggingMode) {
+    super(name, null, filter, false, true, manager);
+    this.operationManager = operationManager;
+    this.manager = manager;
+    this.isVerbose = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
+    this.layout = getDefaultLayout();
+  }
+
+  public Layout<? extends Serializable> getDefaultLayout() {
+    // There should be a ConsoleAppender. Copy its Layout.
+    Logger root = LogManager.getRootLogger();
+    Layout layout = null;
+
+    for (Appender ap : ((org.apache.logging.log4j.core.Logger) root).getAppenders().values()) {
+      if (ap.getClass().equals(ConsoleAppender.class)) {
+        layout = ap.getLayout();
+        break;
+      }
+    }
+
+    return layout;
+  }
 
   /**
    * A log filter that filters messages coming from the logger with the given names.
@@ -52,31 +102,31 @@ public class LogDivertAppender extends WriterAppender {
    * they don't generate more logs for themselves when they process logs.
    * White list filter is used for less verbose log collection
    */
-  private static class NameFilter extends Filter {
+  private static class NameFilter extends AbstractFilter {
     private Pattern namePattern;
-    private LoggingLevel loggingMode;
+    private OperationLog.LoggingLevel loggingMode;
     private OperationManager operationManager;
 
     /* Patterns that are excluded in verbose logging level.
      * Filter out messages coming from log processing classes, or we'll run an infinite loop.
      */
     private static final Pattern verboseExcludeNamePattern = Pattern.compile(Joiner.on("|").
-      join(new String[] {LOG.getName(), OperationLog.class.getName(),
-      OperationManager.class.getName()}));
+        join(new String[]{LOG.getName(), OperationLog.class.getName(),
+            OperationManager.class.getName()}));
 
     /* Patterns that are included in execution logging level.
      * In execution mode, show only select logger messages.
      */
     private static final Pattern executionIncludeNamePattern = Pattern.compile(Joiner.on("|").
-      join(new String[] {"org.apache.hadoop.mapreduce.JobSubmitter",
-      "org.apache.hadoop.mapreduce.Job", "SessionState", Task.class.getName(),
-      "org.apache.hadoop.hive.ql.exec.spark.status.SparkJobMonitor"}));
+        join(new String[]{"org.apache.hadoop.mapreduce.JobSubmitter",
+            "org.apache.hadoop.mapreduce.Job", "SessionState", Task.class.getName(),
+            "org.apache.hadoop.hive.ql.exec.spark.status.SparkJobMonitor"}));
 
     /* Patterns that are included in performance logging level.
      * In performance mode, show execution and performance logger messages.
      */
     private static final Pattern performanceIncludeNamePattern = Pattern.compile(
-      executionIncludeNamePattern.pattern() + "|" + PerfLogger.class.getName());
+        executionIncludeNamePattern.pattern() + "|" + PerfLogger.class.getName());
 
     private void setCurrentNamePattern(OperationLog.LoggingLevel mode) {
       if (mode == OperationLog.LoggingLevel.VERBOSE) {
@@ -88,26 +138,25 @@ public class LogDivertAppender extends WriterAppender {
       }
     }
 
-    public NameFilter(
-      OperationLog.LoggingLevel loggingMode, OperationManager op) {
+    public NameFilter(OperationLog.LoggingLevel loggingMode, OperationManager op) {
       this.operationManager = op;
       this.loggingMode = loggingMode;
       setCurrentNamePattern(loggingMode);
     }
 
     @Override
-    public int decide(LoggingEvent ev) {
+    public Result filter(LogEvent event) {
       OperationLog log = operationManager.getOperationLogByThread();
       boolean excludeMatches = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
 
       if (log == null) {
-        return Filter.DENY;
+        return Result.DENY;
       }
 
       OperationLog.LoggingLevel currentLoggingMode = log.getOpLoggingLevel();
       // If logging is disabled, deny everything.
       if (currentLoggingMode == OperationLog.LoggingLevel.NONE) {
-        return Filter.DENY;
+        return Result.DENY;
       }
       // Look at the current session's setting
       // and set the pattern and excludeMatches accordingly.
@@ -116,88 +165,58 @@ public class LogDivertAppender extends WriterAppender {
         setCurrentNamePattern(loggingMode);
       }
 
-      boolean isMatch = namePattern.matcher(ev.getLoggerName()).matches();
+      boolean isMatch = namePattern.matcher(event.getLoggerName()).matches();
 
       if (excludeMatches == isMatch) {
         // Deny if this is black-list filter (excludeMatches = true) and it
-        // matched
-        // or if this is whitelist filter and it didn't match
-        return Filter.DENY;
+        // matched or if this is whitelist filter and it didn't match
+        return Result.DENY;
       }
-      return Filter.NEUTRAL;
+      return Result.NEUTRAL;
     }
   }
 
-  /** This is where the log message will go to */
-  private final CharArrayWriter writer = new CharArrayWriter();
-
-  private void setLayout (boolean isVerbose, Layout lo) {
-    if (isVerbose) {
-      if (lo == null) {
-        lo = CLIServiceUtils.verboseLayout;
-        LOG.info("Cannot find a Layout from a ConsoleAppender. Using default Layout pattern.");
-      }
-    } else {
-      lo = CLIServiceUtils.nonVerboseLayout;
-    }
-    setLayout(lo);
+  public static LogDivertAppender createInstance(OperationManager operationManager,
+      OperationLog.LoggingLevel loggingMode) {
+    return new LogDivertAppender("LogDivertAppender", new NameFilter(loggingMode, operationManager),
+        new StringOutputStreamManager(new ByteArrayOutputStream(), "StringStream", null),
+        operationManager, loggingMode);
   }
 
-  private void initLayout(boolean isVerbose) {
-    // There should be a ConsoleAppender. Copy its Layout.
-    Logger root = Logger.getRootLogger();
-    Layout layout = null;
-
-    Enumeration<?> appenders = root.getAllAppenders();
-    while (appenders.hasMoreElements()) {
-      Appender ap = (Appender) appenders.nextElement();
-      if (ap.getClass().equals(ConsoleAppender.class)) {
-        layout = ap.getLayout();
-        break;
-      }
-    }
-    setLayout(isVerbose, layout);
+  public String getOutput() {
+    return new String(manager.getStream().toByteArray());
   }
 
-  public LogDivertAppender(OperationManager operationManager,
-    OperationLog.LoggingLevel loggingMode) {
-    isVerbose = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
-    initLayout(isVerbose);
-    setWriter(writer);
-    setName("LogDivertAppender");
-    this.operationManager = operationManager;
-    this.verboseLayout = isVerbose ? layout : CLIServiceUtils.verboseLayout;
-    addFilter(new NameFilter(loggingMode, operationManager));
+  @Override
+  public void start() {
+    super.start();
   }
 
   @Override
-  public void doAppend(LoggingEvent event) {
-    OperationLog log = operationManager.getOperationLogByThread();
+  public Layout<? extends Serializable> getLayout() {
 
-    // Set current layout depending on the verbose/non-verbose mode.
+    // If there is a logging level change from verbose->non-verbose or vice-versa since
+    // the last subAppend call, change the layout to preserve consistency.
+    OperationLog log = operationManager.getOperationLogByThread();
     if (log != null) {
-      boolean isCurrModeVerbose = (log.getOpLoggingLevel() == OperationLog.LoggingLevel.VERBOSE);
+      isVerbose = (log.getOpLoggingLevel() == OperationLog.LoggingLevel.VERBOSE);
+    }
 
-      // If there is a logging level change from verbose->non-verbose or vice-versa since
-      // the last subAppend call, change the layout to preserve consistency.
-      if (isCurrModeVerbose != isVerbose) {
-        isVerbose = isCurrModeVerbose;
-        setLayout(isVerbose, verboseLayout);
-      }
+    // layout is immutable in log4j2, so we cheat here and return a different layout when
+    // verbosity changes
+    if (isVerbose) {
+      return verboseLayout;
+    } else {
+      return layout == null ? nonVerboseLayout : layout;
     }
-    super.doAppend(event);
   }
 
-  /**
-   * Overrides WriterAppender.subAppend(), which does the real logging. No need
-   * to worry about concurrency since log4j calls this synchronously.
-   */
   @Override
-  protected void subAppend(LoggingEvent event) {
-    super.subAppend(event);
-    // That should've gone into our writer. Notify the LogContext.
-    String logOutput = writer.toString();
-    writer.reset();
+  public void append(LogEvent event) {
+    super.append(event);
+
+    String logOutput = getOutput();
+    manager.reset();
 
     OperationLog log = operationManager.getOperationLogByThread();
     if (log == null) {
@@ -206,4 +225,22 @@ public class LogDivertAppender extends WriterAppender {
     }
     log.writeOperationLog(logOutput);
   }
+
+  protected static class StringOutputStreamManager extends OutputStreamManager {
+    ByteArrayOutputStream stream;
+
+    protected StringOutputStreamManager(ByteArrayOutputStream os, String streamName,
+        Layout<?> layout) {
+      super(os, streamName, layout);
+      stream = os;
+    }
+
+    public ByteArrayOutputStream getStream() {
+      return stream;
+    }
+
+    public void reset() {
+      stream.reset();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
index 9b0a519..304a525 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
@@ -41,8 +41,11 @@ import org.apache.hive.service.cli.RowSet;
 import org.apache.hive.service.cli.RowSetFactory;
 import org.apache.hive.service.cli.TableSchema;
 import org.apache.hive.service.cli.session.HiveSession;
-import org.apache.log4j.Appender;
-import org.apache.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.Appender;
+import org.apache.logging.log4j.core.LoggerContext;
+import org.apache.logging.log4j.core.config.Configuration;
+import org.apache.logging.log4j.core.config.LoggerConfig;
 
 /**
  * OperationManager.
@@ -50,7 +53,6 @@ import org.apache.log4j.Logger;
  */
 public class OperationManager extends AbstractService {
   private final Log LOG = LogFactory.getLog(OperationManager.class.getName());
-
   private final Map<OperationHandle, Operation> handleToOperation =
       new HashMap<OperationHandle, Operation>();
 
@@ -83,8 +85,13 @@ public class OperationManager extends AbstractService {
 
   private void initOperationLogCapture(String loggingMode) {
     // Register another Appender (with the same layout) that talks to us.
-    Appender ap = new LogDivertAppender(this, OperationLog.getLoggingLevel(loggingMode));
-    Logger.getRootLogger().addAppender(ap);
+    Appender ap = LogDivertAppender.createInstance(this, OperationLog.getLoggingLevel(loggingMode));
+    LoggerContext context = (LoggerContext) LogManager.getContext(false);
+    Configuration configuration = context.getConfiguration();
+    LoggerConfig loggerConfig = configuration.getLoggerConfig(LogManager.getLogger().getName());
+    loggerConfig.addAppender(ap, null, null);
+    context.updateLoggers();
+    ap.start();
   }
 
   public ExecuteStatementOperation newExecuteStatementOperation(HiveSession parentSession,

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/shims/common/pom.xml
----------------------------------------------------------------------
diff --git a/shims/common/pom.xml b/shims/common/pom.xml
index 9e9a3b7..dfdec2b 100644
--- a/shims/common/pom.xml
+++ b/shims/common/pom.xml
@@ -41,14 +41,19 @@
       <version>${commons-logging.version}</version>
     </dependency>
     <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <version>${log4j.version}</version>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-1.2-api</artifactId>
+      <version>${log4j2.version}</version>
     </dependency>
     <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>apache-log4j-extras</artifactId>
-      <version>${log4j-extras.version}</version>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-slf4j-impl</artifactId>
+      <version>${log4j2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-jcl</artifactId>
+      <version>${log4j2.version}</version>
     </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/shims/common/src/main/java/org/apache/hadoop/hive/shims/HiveEventCounter.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HiveEventCounter.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HiveEventCounter.java
deleted file mode 100644
index 224b135..0000000
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HiveEventCounter.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.shims;
-
-import org.apache.log4j.Appender;
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.Layout;
-import org.apache.log4j.spi.ErrorHandler;
-import org.apache.log4j.spi.Filter;
-import org.apache.log4j.spi.LoggingEvent;
-import org.apache.log4j.spi.OptionHandler;
-
-public class HiveEventCounter implements Appender, OptionHandler {
-
-  AppenderSkeleton hadoopEventCounter;
-
-  public HiveEventCounter() {
-    hadoopEventCounter = ShimLoader.getEventCounter();
-  }
-
-  @Override
-  public void close() {
-    hadoopEventCounter.close();
-  }
-
-  @Override
-  public boolean requiresLayout() {
-    return hadoopEventCounter.requiresLayout();
-  }
-
-  @Override
-  public void addFilter(Filter filter) {
-    hadoopEventCounter.addFilter(filter);
-  }
-
-  @Override
-  public void clearFilters() {
-    hadoopEventCounter.clearFilters();
-  }
-
-  @Override
-  public void doAppend(LoggingEvent event) {
-    hadoopEventCounter.doAppend(event);
-  }
-
-  @Override
-  public ErrorHandler getErrorHandler() {
-    return hadoopEventCounter.getErrorHandler();
-  }
-
-  @Override
-  public Filter getFilter() {
-    return hadoopEventCounter.getFilter();
-  }
-
-  @Override
-  public Layout getLayout() {
-    return hadoopEventCounter.getLayout();
-  }
-
-  @Override
-  public String getName() {
-    return hadoopEventCounter.getName();
-  }
-
-  @Override
-  public void setErrorHandler(ErrorHandler handler) {
-    hadoopEventCounter.setErrorHandler(handler);
-  }
-
-  @Override
-  public void setLayout(Layout layout) {
-    hadoopEventCounter.setLayout(layout);
-  }
-
-  @Override
-  public void setName(String name) {
-    hadoopEventCounter.setName(name);
-  }
-
-  @Override
-  public void activateOptions() {
-    hadoopEventCounter.activateOptions();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/spark-client/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/spark-client/src/test/resources/log4j.properties b/spark-client/src/test/resources/log4j.properties
deleted file mode 100644
index 93a60cc..0000000
--- a/spark-client/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set everything to be logged to the file target/unit-tests.log
-log4j.rootCategory=DEBUG, console
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/spark-client/src/test/resources/log4j2.xml
----------------------------------------------------------------------
diff --git a/spark-client/src/test/resources/log4j2.xml b/spark-client/src/test/resources/log4j2.xml
new file mode 100644
index 0000000..a435069
--- /dev/null
+++ b/spark-client/src/test/resources/log4j2.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<Configuration status="info" strict="true" name="SparkClientLog4j2"
+ packages="org.apache.hadoop.hive.ql.log">
+
+  <Properties>
+    <Property name="spark.log.level">DEBUG</Property>
+    <Property name="spark.root.logger">console</Property>
+  </Properties>
+
+  <Appenders>
+    <Console name="console" target="SYSTEM_ERR">
+      <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n"/>
+    </Console>
+  </Appenders>
+
+  <Loggers>
+    <Root level="DEBUG">
+      <AppenderRef ref="${sys:spark.root.logger}" level="${sys:spark.log.level}"/>
+    </Root>
+  </Loggers>
+
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/storage-api/pom.xml
----------------------------------------------------------------------
diff --git a/storage-api/pom.xml b/storage-api/pom.xml
index 71b51b8..71b79f1 100644
--- a/storage-api/pom.xml
+++ b/storage-api/pom.xml
@@ -32,13 +32,6 @@
   </properties>
 
   <dependencies>
-    <!-- dependencies are always listed in sorted order by groupId, artifectId -->
-    <!-- inter-project -->
-    <dependency>
-      <groupId>log4j</groupId>
-      <artifactId>log4j</artifactId>
-      <version>${log4j.version}</version>
-    </dependency>
     <!-- test inter-project -->
     <dependency>
       <groupId>junit</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/testutils/ptest2/pom.xml
----------------------------------------------------------------------
diff --git a/testutils/ptest2/pom.xml b/testutils/ptest2/pom.xml
index 211678e..2cf7f45 100644
--- a/testutils/ptest2/pom.xml
+++ b/testutils/ptest2/pom.xml
@@ -64,6 +64,26 @@ limitations under the License.
       <version>15.0</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-1.2-api</artifactId>
+      <version>${log4j2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-web</artifactId>
+      <version>${log4j2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-slf4j-impl</artifactId>
+      <version>${log4j2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-jcl</artifactId>
+      <version>${log4j2.version}</version>
+    </dependency>
+    <dependency>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
       <version>1.2.17</version>

http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/testutils/ptest2/src/main/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/log4j.properties b/testutils/ptest2/src/main/resources/log4j.properties
deleted file mode 100644
index edb9696..0000000
--- a/testutils/ptest2/src/main/resources/log4j.properties
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-hive.ptest.logdir=target
-
-log4j.rootLogger=DEBUG,FILE
-log4j.threshhold=ALL
-
-log4j.appender.FILE=org.apache.log4j.RollingFileAppender
-log4j.appender.FILE.File=${hive.ptest.logdir}/ptest.log
-log4j.appender.FILE.MaxFileSize=50MB
-log4j.appender.FILE.MaxBackupIndex=1
-log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.FILE.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
-
-log4j.logger.org.apache.http=INFO
-log4j.logger.org.springframework=INFO
-log4j.logger.org.jclouds=INFO
-log4j.logger.jclouds=INFO
-log4j.logger.org.apache.hive=DEBUG
-log4j.logger.org.apache.http=TRACE
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN


[12/50] [abbrv] hive git commit: HIVE-11498: HIVE Authorization v2 should not check permission for dummy entity (Dapeng Sun via Dong Chen)

Posted by se...@apache.org.
HIVE-11498: HIVE Authorization v2 should not check permission for dummy entity (Dapeng Sun via Dong Chen)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/70631bb4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/70631bb4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/70631bb4

Branch: refs/heads/hbase-metastore
Commit: 70631bb4cff0c0cbd7055e843e091bfd4fae8e4e
Parents: 7f3e481
Author: Dapeng Sun <sd...@apache.org>
Authored: Tue Aug 11 00:56:13 2015 -0400
Committer: Dong Chen <do...@intel.com>
Committed: Tue Aug 11 01:37:16 2015 -0400

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/Driver.java        |  5 ++++-
 .../queries/clientpositive/authorization_1_sql_std.q     |  4 ++++
 .../results/clientpositive/authorization_1_sql_std.q.out | 11 +++++++++++
 3 files changed, 19 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/70631bb4/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index cc85f31..e7b7b55 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -787,7 +787,10 @@ public class Driver implements CommandProcessor {
     for(Entity privObject : privObjects){
       HivePrivilegeObjectType privObjType =
           AuthorizationUtils.getHivePrivilegeObjectType(privObject.getType());
-
+      if(privObject.isDummy()) {
+        //do not authorize dummy readEntity or writeEntity
+        continue;
+      }
       if(privObject instanceof ReadEntity && !((ReadEntity)privObject).isDirect()){
         // In case of views, the underlying views or tables are not direct dependencies
         // and are not used for authorization checks.

http://git-wip-us.apache.org/repos/asf/hive/blob/70631bb4/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_1_sql_std.q b/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
index 82896a4..b7b6710 100644
--- a/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
+++ b/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
@@ -6,6 +6,10 @@ set user.name=hive_admin_user;
 create table src_autho_test (key STRING, value STRING) ;
 
 set hive.security.authorization.enabled=true;
+
+--select dummy table
+select 1;
+
 set  role ADMIN; 
 --table grant to user
 

http://git-wip-us.apache.org/repos/asf/hive/blob/70631bb4/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out b/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out
index 44c2fbd..2315fd4 100644
--- a/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out
+++ b/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out
@@ -6,6 +6,17 @@ POSTHOOK: query: create table src_autho_test (key STRING, value STRING)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+PREHOOK: query: --select dummy table
+select 1
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: --select dummy table
+select 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+1
 PREHOOK: query: set  role ADMIN
 PREHOOK: type: SHOW_ROLES
 POSTHOOK: query: set  role ADMIN


[25/50] [abbrv] hive git commit: HIVE-11451: SemanticAnalyzer throws IndexOutOfBounds Exception (Aihua Xu, reviewed by Chao Sun)

Posted by se...@apache.org.
HIVE-11451: SemanticAnalyzer throws IndexOutOfBounds Exception (Aihua Xu, reviewed by Chao Sun)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/db46e6e8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/db46e6e8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/db46e6e8

Branch: refs/heads/hbase-metastore
Commit: db46e6e8ccc4ee86fbd074dbc3d2e4fa7d88ce25
Parents: bd90fc3
Author: Aihua Xu <ai...@gmail.com>
Authored: Thu Aug 13 10:55:30 2015 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Thu Aug 13 11:24:31 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java       | 15 +++++++++------
 .../clientnegative/mismatch_columns_insertion.q      |  4 ++++
 .../clientnegative/mismatch_columns_insertion.q.out  |  9 +++++++++
 3 files changed, 22 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/db46e6e8/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 5ea6f3f..b809a23 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6724,6 +6724,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       outColumnCnt += dpCtx.getNumDPCols();
     }
 
+    // The numbers of input columns and output columns should match for regular query
+    if (!updating() && !deleting() && inColumnCnt != outColumnCnt) {
+      String reason = "Table " + dest + " has " + outColumnCnt
+          + " columns, but query has " + inColumnCnt + " columns.";
+      throw new SemanticException(ErrorMsg.TARGET_TABLE_COLUMN_MISMATCH.getMsg(
+          qb.getParseInfo().getDestForClause(dest), reason));
+    }
+
     // Check column types
     boolean converted = false;
     int columnNumber = tableFields.size();
@@ -6830,12 +6838,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         dpCtx.mapInputToDP(rowFields.subList(tableFields.size() + 1, rowFields.size()));
       }
     } else {
-      if (inColumnCnt != outColumnCnt) {
-        String reason = "Table " + dest + " has " + outColumnCnt
-            + " columns, but query has " + inColumnCnt + " columns.";
-        throw new SemanticException(ErrorMsg.TARGET_TABLE_COLUMN_MISMATCH.getMsg(
-            qb.getParseInfo().getDestForClause(dest), reason));
-      } else if (dynPart && dpCtx != null) {
+      if (dynPart && dpCtx != null) {
         // create the mapping from input ExprNode to dest table DP column
         dpCtx.mapInputToDP(rowFields.subList(tableFields.size(), rowFields.size()));
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/db46e6e8/ql/src/test/queries/clientnegative/mismatch_columns_insertion.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/mismatch_columns_insertion.q b/ql/src/test/queries/clientnegative/mismatch_columns_insertion.q
new file mode 100644
index 0000000..dad1ec1
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/mismatch_columns_insertion.q
@@ -0,0 +1,4 @@
+
+create table mismatch_columns(key string, value string);
+
+insert overwrite table mismatch_columns select key from srcpart where ds is not null;

http://git-wip-us.apache.org/repos/asf/hive/blob/db46e6e8/ql/src/test/results/clientnegative/mismatch_columns_insertion.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/mismatch_columns_insertion.q.out b/ql/src/test/results/clientnegative/mismatch_columns_insertion.q.out
new file mode 100644
index 0000000..831af12
--- /dev/null
+++ b/ql/src/test/results/clientnegative/mismatch_columns_insertion.q.out
@@ -0,0 +1,9 @@
+PREHOOK: query: create table mismatch_columns(key string, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@mismatch_columns
+POSTHOOK: query: create table mismatch_columns(key string, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@mismatch_columns
+FAILED: SemanticException [Error 10044]: Line 3:23 Cannot insert into target table because column number/types are different 'mismatch_columns': Table insclause-0 has 2 columns, but query has 1 columns.


[03/50] [abbrv] hive git commit: HIVE-11436: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : dealing with empty char (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
HIVE-11436: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : dealing with empty char (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0b38612f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0b38612f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0b38612f

Branch: refs/heads/hbase-metastore
Commit: 0b38612f6aede1b2e87b4a3f466f27ebf3612d1e
Parents: 5abcc6a
Author: Pengcheng Xiong <px...@hortonworks.com>
Authored: Mon Aug 10 12:42:17 2015 +0300
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Mon Aug 10 12:42:17 2015 +0300

----------------------------------------------------------------------
 .../calcite/translator/ExprNodeConverter.java     | 18 ++----------------
 1 file changed, 2 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0b38612f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index b6a79db..00bf009 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -225,23 +225,9 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
     case DECIMAL:
       return new ExprNodeConstantDesc(TypeInfoFactory.getDecimalTypeInfo(lType.getPrecision(),
           lType.getScale()), HiveDecimal.create((BigDecimal)literal.getValue3()));
-    case VARCHAR: {
-      int varcharLength = lType.getPrecision();
-      // If we cannot use Varchar due to type length restrictions, we use String
-      if (varcharLength < 1 || varcharLength > HiveVarchar.MAX_VARCHAR_LENGTH) {
-        return new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, literal.getValue3());
-      }
-      return new ExprNodeConstantDesc(TypeInfoFactory.getVarcharTypeInfo(varcharLength),
-          new HiveVarchar((String) literal.getValue3(), varcharLength));
-    }
+    case VARCHAR:
     case CHAR: {
-      int charLength = lType.getPrecision();
-      // If we cannot use Char due to type length restrictions, we use String
-      if (charLength < 1 || charLength > HiveChar.MAX_CHAR_LENGTH) {
-        return new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, literal.getValue3());
-      }
-      return new ExprNodeConstantDesc(TypeInfoFactory.getCharTypeInfo(charLength),
-          new HiveChar((String) literal.getValue3(), charLength));
+      return new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, literal.getValue3());
     }
     case INTERVAL_YEAR_MONTH: {
       BigDecimal monthsBd = (BigDecimal) literal.getValue();


[19/50] [abbrv] hive git commit: HIVE-11442: Remove commons-configuration.jar from Hive distribution

Posted by se...@apache.org.
HIVE-11442: Remove commons-configuration.jar from Hive distribution


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c4ceefb4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c4ceefb4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c4ceefb4

Branch: refs/heads/hbase-metastore
Commit: c4ceefb4c7a5e17780e43acbeabdcca872bef3ae
Parents: df138f2
Author: Daniel Dai <da...@hortonworks.com>
Authored: Wed Aug 12 10:12:02 2015 -0700
Committer: Daniel Dai <da...@hortonworks.com>
Committed: Wed Aug 12 10:12:59 2015 -0700

----------------------------------------------------------------------
 jdbc/pom.xml                        | 1 +
 packaging/src/main/assembly/bin.xml | 3 ++-
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c4ceefb4/jdbc/pom.xml
----------------------------------------------------------------------
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index 4fee22c..371d709 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -189,6 +189,7 @@
               <artifactSet>
                 <excludes>
                   <exclude>org.apache.commons:commons-compress</exclude>
+                  <exclude>commons-configuration:commons-configuration</exclude>
                   <exclude>org.apache.hadoop:*</exclude>
                   <exclude>org.apache.hive:hive-ant</exclude>
                   <exclude>org.apache.ant:*</exclude>

http://git-wip-us.apache.org/repos/asf/hive/blob/c4ceefb4/packaging/src/main/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/packaging/src/main/assembly/bin.xml b/packaging/src/main/assembly/bin.xml
index a1c176f..63253c5 100644
--- a/packaging/src/main/assembly/bin.xml
+++ b/packaging/src/main/assembly/bin.xml
@@ -41,7 +41,8 @@
       <excludes>
         <exclude>org.apache.hadoop:*</exclude>
         <exclude>org.apache.hive.hcatalog:*</exclude>
-		    <exclude>org.slf4j:*</exclude>
+        <exclude>org.slf4j:*</exclude>
+        <exclude>commons-configuration:commons-configuration</exclude>
       </excludes>
     </dependencySet>
     <dependencySet>


[11/50] [abbrv] hive git commit: HIVE-11398: Parse wide OR and wide AND trees to flat OR/AND trees (Jesus Camacho Rodriguez via Gopal V)

Posted by se...@apache.org.
HIVE-11398: Parse wide OR and wide AND trees to flat OR/AND trees (Jesus Camacho Rodriguez via Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7f3e4811
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7f3e4811
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7f3e4811

Branch: refs/heads/hbase-metastore
Commit: 7f3e4811ee0293e4b3889984dc7d790415532307
Parents: 538ae70
Author: Gunther Hagleitner <gu...@apache.org>
Authored: Mon Aug 10 19:57:19 2015 -0700
Committer: Gunther Hagleitner <gu...@apache.org>
Committed: Mon Aug 10 19:57:19 2015 -0700

----------------------------------------------------------------------
 .../test/results/positive/hbase_timestamp.q.out |   8 +-
 .../optimizer/ConstantPropagateProcFactory.java |  83 +++++++---
 .../ql/optimizer/pcr/PcrExprProcFactory.java    | 151 +++++++++++++------
 .../hive/ql/optimizer/ppr/PartitionPruner.java  |  69 ++++++---
 .../hive/ql/parse/TypeCheckProcFactory.java     |  40 ++++-
 .../hive/ql/udf/generic/GenericUDFOPAnd.java    |  59 +++++---
 .../hive/ql/udf/generic/GenericUDFOPOr.java     |  59 +++++---
 .../queries/clientpositive/flatten_and_or.q     |  17 +++
 .../annotate_stats_deep_filters.q.out           |   4 +-
 .../clientpositive/dynamic_rdd_cache.q.out      |   6 +-
 .../results/clientpositive/flatten_and_or.q.out |  66 ++++++++
 .../groupby_multi_single_reducer3.q.out         |   8 +-
 .../clientpositive/input_testxpath4.q.out       |   2 +-
 .../join_cond_pushdown_unqual4.q.out            |   2 +-
 .../test/results/clientpositive/lineage3.q.out  |   2 +-
 .../clientpositive/orc_predicate_pushdown.q.out |  36 ++---
 .../results/clientpositive/ppd_gby_join.q.out   |   4 +-
 .../test/results/clientpositive/ppd_join.q.out  |   4 +-
 .../test/results/clientpositive/ppd_join2.q.out |   4 +-
 .../test/results/clientpositive/ppd_join3.q.out |   6 +-
 .../clientpositive/ppd_outer_join4.q.out        |   2 +-
 .../spark/dynamic_rdd_cache.q.out               |   6 +-
 .../spark/groupby_multi_single_reducer3.q.out   |   8 +-
 .../spark/join_cond_pushdown_unqual4.q.out      |   2 +-
 .../clientpositive/spark/ppd_gby_join.q.out     |   4 +-
 .../results/clientpositive/spark/ppd_join.q.out |   4 +-
 .../clientpositive/spark/ppd_join2.q.out        |   4 +-
 .../clientpositive/spark/ppd_join3.q.out        |   6 +-
 .../clientpositive/spark/ppd_outer_join4.q.out  |   2 +-
 .../clientpositive/spark/vectorization_0.q.out  |   2 +-
 .../clientpositive/spark/vectorization_13.q.out |   4 +-
 .../clientpositive/spark/vectorization_15.q.out |   2 +-
 .../clientpositive/spark/vectorization_17.q.out |   2 +-
 .../spark/vectorization_short_regress.q.out     |  22 +--
 .../clientpositive/spark/vectorized_case.q.out  |   2 +-
 .../tez/vector_mr_diff_schema_alias.q.out       |   2 +-
 .../clientpositive/tez/vectorization_0.q.out    |   2 +-
 .../clientpositive/tez/vectorization_13.q.out   |   4 +-
 .../clientpositive/tez/vectorization_15.q.out   |   2 +-
 .../clientpositive/tez/vectorization_17.q.out   |   2 +-
 .../clientpositive/tez/vectorization_7.q.out    |   4 +-
 .../clientpositive/tez/vectorization_8.q.out    |   4 +-
 .../tez/vectorization_short_regress.q.out       |  22 +--
 .../clientpositive/tez/vectorized_case.q.out    |   2 +-
 ql/src/test/results/clientpositive/udf_or.q.out |   4 +-
 .../vector_mr_diff_schema_alias.q.out           |   2 +-
 .../clientpositive/vectorization_0.q.out        |   2 +-
 .../clientpositive/vectorization_13.q.out       |   4 +-
 .../clientpositive/vectorization_15.q.out       |   2 +-
 .../clientpositive/vectorization_17.q.out       |   2 +-
 .../clientpositive/vectorization_7.q.out        |   4 +-
 .../clientpositive/vectorization_8.q.out        |   4 +-
 .../vectorization_short_regress.q.out           |  22 +--
 .../clientpositive/vectorized_case.q.out        |   2 +-
 54 files changed, 530 insertions(+), 264 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_timestamp.q.out b/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
index 7aef504..538e551 100644
--- a/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
@@ -174,7 +174,7 @@ STAGE PLANS:
             filterExpr: (((key > 100.0) and (key < 400.0)) and (time < 200000000000)) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time < 200000000000))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 100.0) and (UDFToDouble(key) < 400.0) and (time < 200000000000)) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp)
@@ -223,7 +223,7 @@ STAGE PLANS:
             filterExpr: (((key > 100.0) and (key < 400.0)) and (time > 100000000000)) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time > 100000000000))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 100.0) and (UDFToDouble(key) < 400.0) and (time > 100000000000)) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp)
@@ -274,7 +274,7 @@ STAGE PLANS:
             filterExpr: (((key > 100.0) and (key < 400.0)) and (time <= 100000000000)) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time <= 100000000000))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 100.0) and (UDFToDouble(key) < 400.0) and (time <= 100000000000)) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp)
@@ -323,7 +323,7 @@ STAGE PLANS:
             filterExpr: (((key > 100.0) and (key < 400.0)) and (time >= 200000000000)) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 100.0) and ((UDFToDouble(key) < 400.0) and (time >= 200000000000))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 100.0) and (UDFToDouble(key) < 400.0) and (time >= 200000000000)) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), CAST( time AS TIMESTAMP) (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
index 410735c..cf10c52 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
@@ -17,6 +17,7 @@ package org.apache.hadoop.hive.ql.optimizer;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.BitSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -86,6 +87,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 
 import com.google.common.collect.ImmutableSet;
@@ -506,53 +508,92 @@ public final class ConstantPropagateProcFactory {
      }
     }
     if (udf instanceof GenericUDFOPAnd) {
-      for (int i = 0; i < 2; i++) {
+      final BitSet positionsToRemove = new BitSet();
+      final List<ExprNodeDesc> notNullExprs = new ArrayList<ExprNodeDesc>();
+      final List<Integer> notNullExprsPositions = new ArrayList<Integer>();
+      final List<ExprNodeDesc> compareExprs = new ArrayList<ExprNodeDesc>();
+      for (int i = 0; i < newExprs.size(); i++) {
         ExprNodeDesc childExpr = newExprs.get(i);
-        ExprNodeDesc other = newExprs.get(Math.abs(i - 1));
         if (childExpr instanceof ExprNodeConstantDesc) {
           ExprNodeConstantDesc c = (ExprNodeConstantDesc) childExpr;
           if (Boolean.TRUE.equals(c.getValue())) {
-
             // if true, prune it
-            return other;
+            positionsToRemove.set(i);
           } else {
-
-            // if false return false
+            // if false, return false
             return childExpr;
           }
-        } else // Try to fold (key = 86) and (key is not null) to (key = 86)
-        if (childExpr instanceof ExprNodeGenericFuncDesc &&
-            ((ExprNodeGenericFuncDesc)childExpr).getGenericUDF() instanceof GenericUDFOPNotNull &&
-            childExpr.getChildren().get(0) instanceof ExprNodeColumnDesc && other instanceof ExprNodeGenericFuncDesc
-            && ((ExprNodeGenericFuncDesc)other).getGenericUDF() instanceof GenericUDFBaseCompare
-            && other.getChildren().size() == 2) {
-          ExprNodeColumnDesc colDesc = getColumnExpr(other.getChildren().get(0));
+        } else if (childExpr instanceof ExprNodeGenericFuncDesc &&
+                ((ExprNodeGenericFuncDesc)childExpr).getGenericUDF() instanceof GenericUDFOPNotNull &&
+                childExpr.getChildren().get(0) instanceof ExprNodeColumnDesc) {
+          notNullExprs.add(childExpr.getChildren().get(0));
+          notNullExprsPositions.add(i);
+        } else if (childExpr instanceof ExprNodeGenericFuncDesc
+                && ((ExprNodeGenericFuncDesc)childExpr).getGenericUDF() instanceof GenericUDFBaseCompare
+                && childExpr.getChildren().size() == 2) {
+          ExprNodeColumnDesc colDesc = getColumnExpr(childExpr.getChildren().get(0));
           if (null == colDesc) {
-            colDesc = getColumnExpr(other.getChildren().get(1));
+            colDesc = getColumnExpr(childExpr.getChildren().get(1));
           }
-          if (null != colDesc && colDesc.isSame(childExpr.getChildren().get(0))) {
-            return other;
+          if (colDesc != null) {
+            compareExprs.add(colDesc);
+          }
+        }
+      }
+      // Try to fold (key = 86) and (key is not null) to (key = 86)
+      for (int i = 0; i < notNullExprs.size(); i++) {
+        for (ExprNodeDesc other : compareExprs) {
+          if (notNullExprs.get(i).isSame(other)) {
+            positionsToRemove.set(notNullExprsPositions.get(i));
+            break;
           }
         }
       }
+      // Remove unnecessary expressions
+      int pos = 0;
+      int removed = 0;
+      while ((pos = positionsToRemove.nextSetBit(pos)) != -1) {
+        newExprs.remove(pos - removed);
+        pos++;
+        removed++;
+      }
+      if (newExprs.size() == 0) {
+        return new ExprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, Boolean.TRUE);
+      }
+      if (newExprs.size() == 1) {
+        return newExprs.get(0);
+      }
     }
 
     if (udf instanceof GenericUDFOPOr) {
-      for (int i = 0; i < 2; i++) {
+      final BitSet positionsToRemove = new BitSet();
+      for (int i = 0; i < newExprs.size(); i++) {
         ExprNodeDesc childExpr = newExprs.get(i);
         if (childExpr instanceof ExprNodeConstantDesc) {
           ExprNodeConstantDesc c = (ExprNodeConstantDesc) childExpr;
           if (Boolean.FALSE.equals(c.getValue())) {
-
             // if false, prune it
-            return newExprs.get(Math.abs(i - 1));
-          } else {
-
+            positionsToRemove.set(i);
+          } else
+          if (Boolean.TRUE.equals(c.getValue())) {
             // if true return true
             return childExpr;
           }
         }
       }
+      int pos = 0;
+      int removed = 0;
+      while ((pos = positionsToRemove.nextSetBit(pos)) != -1) {
+        newExprs.remove(pos - removed);
+        pos++;
+        removed++;
+      }
+      if (newExprs.size() == 0) {
+        return new ExprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, Boolean.FALSE);
+      }
+      if (newExprs.size() == 1) {
+        return newExprs.get(0);
+      }
     }
 
     if (udf instanceof GenericUDFWhen) {

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
index d5102bc..71a6c73 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
@@ -25,6 +25,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Stack;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
@@ -57,6 +59,9 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
  * It also generates node by Modifying expr trees with partition conditions removed
  */
 public final class PcrExprProcFactory {
+
+  public static final Log LOG = LogFactory.getLog(PcrExprProcFactory.class.getName());
+
   static Object evalExprWithPart(ExprNodeDesc expr, Partition p, List<VirtualColumn> vcs)
       throws SemanticException {
     StructObjectInspector rowObjectInspector;
@@ -124,25 +129,39 @@ public final class PcrExprProcFactory {
     // prevent instantiation
   }
 
-  static Boolean opAnd(Boolean op1, Boolean op2) {
+  static Boolean opAnd(Boolean... ops) {
     // When people forget to quote a string, op1/op2 is null.
     // For example, select * from some_table where ds > 2012-12-1 and ds < 2012-12-2 .
-    if (op1 != null && op1.equals(Boolean.FALSE) || op2 != null && op2.equals(Boolean.FALSE)) {
-      return Boolean.FALSE;
+    boolean anyNull = false;
+    for (Boolean op : ops) {
+      if (op == null) {
+        anyNull = true;
+        continue;
+      }
+      if (op.equals(Boolean.FALSE)) {
+        return Boolean.FALSE;
+      }
     }
-    if (op1 == null || op2 == null) {
+    if (anyNull) {
       return null;
     }
     return Boolean.TRUE;
   }
 
-  static Boolean opOr(Boolean op1, Boolean op2) {
+  static Boolean opOr(Boolean... ops) {
     // When people forget to quote a string, op1/op2 is null.
     // For example, select * from some_table where ds > 2012-12-1 or ds < 2012-12-2 .
-    if (op1 != null && op1.equals(Boolean.TRUE) || op2 != null && op2.equals(Boolean.TRUE)) {
-      return Boolean.TRUE;
+    boolean anyNull = false;
+    for (Boolean op : ops) {
+      if (op == null) {
+        anyNull = true;
+        continue;
+      }
+      if (op.equals(Boolean.TRUE)) {
+        return Boolean.TRUE;
+      }
     }
-    if (op1 == null || op2 == null) {
+    if (anyNull) {
       return null;
     }
     return Boolean.FALSE;
@@ -255,51 +274,95 @@ public final class PcrExprProcFactory {
               getOutExpr(fd, nodeOutputs));
         }
       } else if (FunctionRegistry.isOpAnd(fd)) {
-        assert (nodeOutputs.length == 2);
-        NodeInfoWrapper c1 = (NodeInfoWrapper)nodeOutputs[0];
-        NodeInfoWrapper c2 = (NodeInfoWrapper)nodeOutputs[1];
-
-        if (c1.state == WalkState.FALSE) {
-          return c1;
-        } else if (c2.state == WalkState.FALSE) {
-          return c2;
-        } else if (c1.state == WalkState.TRUE) {
-          return c2;
-        } else if (c2.state == WalkState.TRUE) {
-          return c1;
-        } else if (c1.state == WalkState.UNKNOWN || c2.state == WalkState.UNKNOWN) {
-          return new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, nodeOutputs));
-        } else if (c1.state == WalkState.DIVIDED && c2.state == WalkState.DIVIDED) {
+        boolean anyUnknown = false; // Whether any of the node outputs is unknown
+        boolean allDivided = true; // Whether all of the node outputs are divided
+        List<NodeInfoWrapper> newNodeOutputsList =
+                new ArrayList<NodeInfoWrapper>(nodeOutputs.length);
+        for (int i = 0; i < nodeOutputs.length; i++) {
+          NodeInfoWrapper c = (NodeInfoWrapper)nodeOutputs[i];
+          if (c.state == WalkState.FALSE) {
+            return c;
+          }
+          if (c.state == WalkState.UNKNOWN) {
+            anyUnknown = true;
+          }
+          if (c.state != WalkState.DIVIDED) {
+            allDivided = false;
+          }
+          if (c.state != WalkState.TRUE) {
+            newNodeOutputsList.add(c);
+          }
+        }
+        // If all of them were true, return true
+        if (newNodeOutputsList.size() == 0) {
+          return new NodeInfoWrapper(WalkState.TRUE, null,
+                  new ExprNodeConstantDesc(fd.getTypeInfo(), Boolean.TRUE));
+        }
+        // If we are left with a single child, return the child
+        if (newNodeOutputsList.size() == 1) {
+          return newNodeOutputsList.get(0);
+        }
+        Object[] newNodeOutputs = newNodeOutputsList.toArray();
+        if (anyUnknown) {
+          return new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, newNodeOutputs));
+        }
+        if (allDivided) {
           Boolean[] results = new Boolean[ctx.getPartList().size()];
           for (int i = 0; i < ctx.getPartList().size(); i++) {
-            results[i] = opAnd(c1.ResultVector[i], c2.ResultVector[i]);
+            Boolean[] andArray = new Boolean[newNodeOutputs.length];
+            for (int j = 0; j < newNodeOutputs.length; j++) {
+              andArray[j] = ((NodeInfoWrapper) newNodeOutputs[j]).ResultVector[i];
+            }
+            results[i] = opAnd(andArray);
           }
-          return getResultWrapFromResults(results, fd, nodeOutputs);
+          return getResultWrapFromResults(results, fd, newNodeOutputs);
         }
-        return new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, nodeOutputs));
+        return new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, newNodeOutputs));
       } else if (FunctionRegistry.isOpOr(fd)) {
-        assert (nodeOutputs.length == 2);
-        NodeInfoWrapper c1 = (NodeInfoWrapper)nodeOutputs[0];
-        NodeInfoWrapper c2 = (NodeInfoWrapper)nodeOutputs[1];
-
-        if (c1.state == WalkState.TRUE) {
-          return c1;
-        } else if (c2.state == WalkState.TRUE) {
-          return c2;
-        } else if (c1.state == WalkState.FALSE) {
-          return c2;
-        } else if (c2.state == WalkState.FALSE) {
-          return c1;
-        } else if (c1.state == WalkState.UNKNOWN || c2.state == WalkState.UNKNOWN) {
-          return new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, nodeOutputs));
-        } else if (c1.state == WalkState.DIVIDED && c2.state == WalkState.DIVIDED) {
+        boolean anyUnknown = false; // Whether any of the node outputs is unknown
+        boolean allDivided = true; // Whether all of the node outputs are divided
+        List<NodeInfoWrapper> newNodeOutputsList =
+                new ArrayList<NodeInfoWrapper>(nodeOutputs.length);
+        for (int i = 0; i< nodeOutputs.length; i++) {
+          NodeInfoWrapper c = (NodeInfoWrapper)nodeOutputs[i];
+          if (c.state == WalkState.TRUE) {
+            return c;
+          }
+          if (c.state == WalkState.UNKNOWN) {
+            anyUnknown = true;
+          }
+          if (c.state != WalkState.DIVIDED) {
+            allDivided = false;
+          }
+          if (c.state != WalkState.FALSE) {
+            newNodeOutputsList.add(c);
+          }
+        }
+        // If all of them were false, return false
+        if (newNodeOutputsList.size() == 0) {
+          return new NodeInfoWrapper(WalkState.FALSE, null,
+                  new ExprNodeConstantDesc(fd.getTypeInfo(), Boolean.FALSE));
+        }
+        // If we are left with a single child, return the child
+        if (newNodeOutputsList.size() == 1) {
+          return newNodeOutputsList.get(0);
+        }
+        Object[] newNodeOutputs = newNodeOutputsList.toArray();
+        if (anyUnknown) {
+          return new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, newNodeOutputs));
+        }
+        if (allDivided) {
           Boolean[] results = new Boolean[ctx.getPartList().size()];
           for (int i = 0; i < ctx.getPartList().size(); i++) {
-            results[i] = opOr(c1.ResultVector[i], c2.ResultVector[i]);
+            Boolean[] orArray = new Boolean[newNodeOutputs.length];
+            for (int j = 0; j < newNodeOutputs.length; j++) {
+              orArray[j] = ((NodeInfoWrapper) newNodeOutputs[j]).ResultVector[i];
+            }
+            results[i] = opOr(orArray);
           }
-          return getResultWrapFromResults(results, fd, nodeOutputs);
+          return getResultWrapFromResults(results, fd, newNodeOutputs);
         }
-        return new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, nodeOutputs));
+        return new NodeInfoWrapper(WalkState.UNKNOWN, null, getOutExpr(fd, newNodeOutputs));
       } else if (!FunctionRegistry.isDeterministic(fd.getGenericUDF())) {
         // If it's a non-deterministic UDF, set unknown to true
         return new NodeInfoWrapper(WalkState.UNKNOWN, null,

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
index dccb598..d264559 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java
@@ -277,35 +277,64 @@ public class PartitionPruner implements Transform {
       GenericUDF udf = ((ExprNodeGenericFuncDesc)expr).getGenericUDF();
       boolean isAnd = udf instanceof GenericUDFOPAnd;
       boolean isOr = udf instanceof GenericUDFOPOr;
+      List<ExprNodeDesc> children = expr.getChildren();
 
-      if (isAnd || isOr) {
-        List<ExprNodeDesc> children = expr.getChildren();
-        ExprNodeDesc left = compactExpr(children.get(0));
-        ExprNodeDesc right = compactExpr(children.get(1));
+      if (isAnd) {
         // Non-partition expressions are converted to nulls.
-        if (left == null && right == null) {
+        List<ExprNodeDesc> newChildren = new ArrayList<ExprNodeDesc>();
+        boolean allTrue = true;
+        for (ExprNodeDesc child : children) {
+          ExprNodeDesc compactChild = compactExpr(child);
+          if (compactChild != null) {
+            if (!isTrueExpr(compactChild)) {
+              newChildren.add(compactChild);
+              allTrue = false;
+            }
+            if (isFalseExpr(compactChild)) {
+              return new ExprNodeConstantDesc(Boolean.FALSE);
+            }
+          } else {
+            allTrue = false;
+          }
+        }
+
+        if (newChildren.size() == 0) {
           return null;
-        } else if (left == null) {
-          return isAnd ? right : null;
-        } else if (right == null) {
-          return isAnd ? left : null;
         }
-        // Handle boolean expressions
-        boolean isLeftFalse = isFalseExpr(left), isRightFalse = isFalseExpr(right),
-            isLeftTrue = isTrueExpr(left), isRightTrue = isTrueExpr(right);
-        if ((isRightTrue && isLeftTrue) || (isOr && (isLeftTrue || isRightTrue))) {
+        if (newChildren.size() == 1) {
+          return newChildren.get(0);
+        }
+        if (allTrue) {
           return new ExprNodeConstantDesc(Boolean.TRUE);
-        } else if ((isRightFalse && isLeftFalse) || (isAnd && (isLeftFalse || isRightFalse))) {
+        }
+        // Nothing to compact, update expr with compacted children.
+        ((ExprNodeGenericFuncDesc) expr).setChildren(newChildren);
+      } else if (isOr) {
+        // Non-partition expressions are converted to nulls.
+        List<ExprNodeDesc> newChildren = new ArrayList<ExprNodeDesc>();
+        boolean allFalse = true;
+        for (ExprNodeDesc child : children) {
+          ExprNodeDesc compactChild = compactExpr(child);
+          if (compactChild != null) {
+            if (isTrueExpr(compactChild)) {
+              return new ExprNodeConstantDesc(Boolean.TRUE);
+            }
+            if (!isFalseExpr(compactChild)) {
+              newChildren.add(compactChild);
+              allFalse = false;
+            }
+          } else {
+            return null;
+          }
+        }
+
+        if (allFalse) {
           return new ExprNodeConstantDesc(Boolean.FALSE);
-        } else if ((isAnd && isLeftTrue) || (isOr && isLeftFalse)) {
-          return right;
-        } else if ((isAnd && isRightTrue) || (isOr && isRightFalse)) {
-          return left;
         }
         // Nothing to compact, update expr with compacted children.
-        children.set(0, left);
-        children.set(1, right);
+        ((ExprNodeGenericFuncDesc) expr).setChildren(newChildren);
       }
+
       return expr;
     } else {
       throw new IllegalStateException("Unexpected type of ExprNodeDesc: " + expr.getExprString());

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index d823f03..cd68f4e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -62,7 +62,9 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.udf.SettableUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -814,10 +816,12 @@ public class TypeCheckProcFactory {
           ((SettableUDF)genericUDF).setTypeInfo(typeInfo);
         }
       }
-
+      
       List<ExprNodeDesc> childrenList = new ArrayList<ExprNodeDesc>(children.length);
+
       childrenList.addAll(Arrays.asList(children));
-      return ExprNodeGenericFuncDesc.newInstance(genericUDF, childrenList);
+      return ExprNodeGenericFuncDesc.newInstance(genericUDF,
+          childrenList);
     }
 
     public static ExprNodeDesc getFuncExprNodeDesc(String udfName,
@@ -1048,8 +1052,36 @@ public class TypeCheckProcFactory {
             }
           }
         }
-
-        desc = ExprNodeGenericFuncDesc.newInstance(genericUDF, funcText, children);
+        if (genericUDF instanceof GenericUDFOPOr) {
+          // flatten OR
+          List<ExprNodeDesc> childrenList = new ArrayList<ExprNodeDesc>(
+              children.size());
+          for (ExprNodeDesc child : children) {
+            if (FunctionRegistry.isOpOr(child)) {
+              childrenList.addAll(child.getChildren());
+            } else {
+              childrenList.add(child);
+            }
+          }
+          desc = ExprNodeGenericFuncDesc.newInstance(genericUDF, funcText,
+              childrenList);
+        } else if (genericUDF instanceof GenericUDFOPAnd) {
+          // flatten AND
+          List<ExprNodeDesc> childrenList = new ArrayList<ExprNodeDesc>(
+              children.size());
+          for (ExprNodeDesc child : children) {
+            if (FunctionRegistry.isOpAnd(child)) {
+              childrenList.addAll(child.getChildren());
+            } else {
+              childrenList.add(child);
+            }
+          }
+          desc = ExprNodeGenericFuncDesc.newInstance(genericUDF, funcText,
+              childrenList);
+        } else {
+          desc = ExprNodeGenericFuncDesc.newInstance(genericUDF, funcText,
+              children);
+        }
       }
       // UDFOPPositive is a no-op.
       // However, we still create it, and then remove it here, to make sure we

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPAnd.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPAnd.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPAnd.java
index 47abb20..db7fbac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPAnd.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPAnd.java
@@ -35,46 +35,43 @@ import org.apache.hadoop.io.BooleanWritable;
 /**
  * GenericUDF Class for computing and.
  */
-@Description(name = "and", value = "a _FUNC_ b - Logical and")
+@Description(name = "and", value = "a1 _FUNC_ a2 _FUNC_ ... _FUNC_ an - Logical and")
 @VectorizedExpressions({ColAndCol.class, FilterExprAndExpr.class, FilterColAndScalar.class,
     FilterScalarAndColumn.class})
 public class GenericUDFOPAnd extends GenericUDF {
   private final BooleanWritable result = new BooleanWritable();
-  private transient BooleanObjectInspector boi0,boi1;
+  private transient BooleanObjectInspector boi[];
   @Override
   public ObjectInspector initialize(ObjectInspector[] arguments)
       throws UDFArgumentException {
-    if (arguments.length != 2) {
+    if (arguments.length < 2) {
       throw new UDFArgumentLengthException(
-          "The operator 'AND' only accepts 2 argument.");
+          "The operator 'AND' accepts at least 2 argument.");
+    }
+    boi = new BooleanObjectInspector[arguments.length];
+    for (int i = 0; i < arguments.length; i++) {
+      boi[i] = (BooleanObjectInspector) arguments[i];
     }
-    boi0 = (BooleanObjectInspector) arguments[0];
-    boi1 = (BooleanObjectInspector) arguments[1];
     return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
   }
 
   @Override
   public Object evaluate(DeferredObject[] arguments) throws HiveException {
-    boolean bool_a0 = false, bool_a1 = false;
-    Object a0 = arguments[0].get();
-    if (a0 != null) {
-      bool_a0 = boi0.get(a0);
-      if (bool_a0 == false) {
-        result.set(false);
-        return result;
-      }
-    }
-
-    Object a1 = arguments[1].get();
-    if (a1 != null) {
-      bool_a1 = boi1.get(a1);
-      if (bool_a1 == false) {
-        result.set(false);
-        return result;
+    boolean notNull = true;
+    for (int i = 0; i < arguments.length; i++) {
+      Object a = arguments[i].get();
+      if (a != null) {
+        boolean bool_a = boi[i].get(a);
+        if (bool_a == false) {
+          result.set(false);
+          return result;
+        }
+      } else {
+        notNull = false;
       }
     }
 
-    if ((a0 != null && bool_a0 == true) && (a1 != null && bool_a1 == true)) {
+    if (notNull) {
       result.set(true);
       return result;
     }
@@ -84,8 +81,20 @@ public class GenericUDFOPAnd extends GenericUDF {
 
   @Override
   public String getDisplayString(String[] children) {
-    assert (children.length == 2);
-    return "(" + children[0] + " and " + children[1] + ")";
+    assert (children.length >= 2);
+    StringBuilder sb = new StringBuilder();
+    sb.append("(");
+    boolean first = true;
+    for (String and : children) {
+      if (!first) {
+        sb.append(" and ");
+      } else {
+        first = false;
+      }
+      sb.append(and);
+    }
+    sb.append(")");
+    return sb.toString();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPOr.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPOr.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPOr.java
index cd656a0..4160610 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPOr.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPOr.java
@@ -35,47 +35,44 @@ import org.apache.hadoop.io.BooleanWritable;
 /**
  * GenericUDF Class for computing or.
  */
-@Description(name = "or", value = "a _FUNC_ b - Logical or")
+@Description(name = "or", value = "a1 _FUNC_ a2 _FUNC_ ... _FUNC_ an - Logical or")
 @VectorizedExpressions({ColOrCol.class, FilterExprOrExpr.class, FilterColOrScalar.class,
     FilterScalarOrColumn.class})
 public class GenericUDFOPOr extends GenericUDF {
   private final BooleanWritable result = new BooleanWritable();
-  private transient BooleanObjectInspector boi0,boi1;
+  private transient BooleanObjectInspector[] boi;
 
   @Override
   public ObjectInspector initialize(ObjectInspector[] arguments)
       throws UDFArgumentException {
-    if (arguments.length != 2) {
+    if (arguments.length < 2) {
       throw new UDFArgumentLengthException(
-          "The operator 'OR' only accepts 2 argument.");
+          "The operator 'OR' accepts at least 2 arguments.");
+    }
+    boi = new BooleanObjectInspector[arguments.length];
+    for (int i = 0; i < arguments.length; i++) {
+      boi[i] = (BooleanObjectInspector) arguments[i];
     }
-    boi0 = (BooleanObjectInspector) arguments[0];
-    boi1 = (BooleanObjectInspector) arguments[1];
     return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector;
   }
 
   @Override
   public Object evaluate(DeferredObject[] arguments) throws HiveException {
-    boolean bool_a0 = false, bool_a1 = false;
-    Object a0 = arguments[0].get();
-    if (a0 != null) {
-      bool_a0 = boi0.get(a0);
-      if (bool_a0 == true) {
-        result.set(true);
-        return result;
-      }
-    }
-
-    Object a1 = arguments[1].get();
-    if (a1 != null) {
-      bool_a1 = boi1.get(a1);
-      if (bool_a1 == true) {
-        result.set(true);
-        return result;
+    boolean notNull = true;
+    for (int i = 0; i < arguments.length; i++) {
+      Object a = arguments[i].get();
+      if (a != null) {
+        boolean bool_a = boi[i].get(a);
+        if (bool_a == true) {
+          result.set(true);
+          return result;
+        }
+      } else {
+        notNull = false;
       }
     }
 
-    if ((a0 != null && bool_a0 == false) && (a1 != null && bool_a1 == false)) {
+    if (notNull) {
       result.set(false);
       return result;
     }
@@ -85,8 +82,20 @@ public class GenericUDFOPOr extends GenericUDF {
 
   @Override
   public String getDisplayString(String[] children) {
-    assert (children.length == 2);
-    return "(" + children[0] + " or " + children[1] + ")";
+    assert (children.length >= 2);
+    StringBuilder sb = new StringBuilder();
+    sb.append("(");
+    boolean first = true;
+    for (String or : children) {
+      if (!first) {
+        sb.append(" or ");
+      } else {
+        first = false;
+      }
+      sb.append(or);
+    }
+    sb.append(")");
+    return sb.toString();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/queries/clientpositive/flatten_and_or.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/flatten_and_or.q b/ql/src/test/queries/clientpositive/flatten_and_or.q
new file mode 100644
index 0000000..6d65225
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/flatten_and_or.q
@@ -0,0 +1,17 @@
+explain
+SELECT key
+FROM src
+WHERE
+   ((key = '0'
+   AND value = '8') OR (key = '1'
+   AND value = '5') OR (key = '2'
+   AND value = '6') OR (key = '3'
+   AND value = '8') OR (key = '4'
+   AND value = '1') OR (key = '5'
+   AND value = '6') OR (key = '6'
+   AND value = '1') OR (key = '7'
+   AND value = '1') OR (key = '8'
+   AND value = '1') OR (key = '9'
+   AND value = '1') OR (key = '10'
+   AND value = '3'))
+;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
index 788d6c8..fc4f294 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_deep_filters.q.out
@@ -120,7 +120,7 @@ STAGE PLANS:
             alias: over1k
             Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (((t = 1) and (si = 2)) or (((t = 2) and (si = 3)) or (((t = 3) and (si = 4)) or (((t = 4) and (si = 5)) or (((t = 5) and (si = 6)) or (((t = 6) and (si = 7)) or (((t = 7) and (si = 8)) or (((t = 9) and (si = 10)) or (((t = 10) and (si = 11)) or (((t = 11) and (si = 12)) or (((t = 12) and (si = 13)) or (((t = 13) and (si = 14)) or (((t = 14) and (si = 15)) or (((t = 15) and (si = 16)) or (((t = 16) and (si = 17)) or (((t = 17) and (si = 18)) or (((t = 27) and (si = 28)) or (((t = 37) and (si = 38)) or (((t = 47) and (si = 48)) or ((t = 52) and (si = 53))))))))))))))))))))) (type: boolean)
+              predicate: (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53))) (type: boolean)
               Statistics: Num rows: 280 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 Statistics: Num rows: 280 Data size: 2232 Basic stats: COMPLETE Column stats: COMPLETE
@@ -209,7 +209,7 @@ STAGE PLANS:
             alias: over1k
             Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((t = 1) and (si = 2)) or (((t = 2) and (si = 3)) or (((t = 3) and (si = 4)) or (((t = 4) and (si = 5)) or (((t = 5) and (si = 6)) or (((t = 6) and (si = 7)) or (((t = 7) and (si = 8)) or (((t = 9) and (si = 10)) or (((t = 10) and (si = 11)) or (((t = 11) and (si = 12)) or (((t = 12) and (si = 13)) or (((t = 13) and (si = 14)) or (((t = 14) and (si = 15)) or (((t = 15) and (si = 16)) or (((t = 16) and (si = 17)) or (((t = 17) and (si = 18)) or (((t = 27) and (si = 28)) or (((t = 37) and (si = 38)) or (((t = 47) and (si = 48)) or ((t = 52) and (si = 53))))))))))))))))))))) (type: boolean)
+              predicate: (((t = 1) and (si = 2)) or ((t = 2) and (si = 3)) or ((t = 3) and (si = 4)) or ((t = 4) and (si = 5)) or ((t = 5) and (si = 6)) or ((t = 6) and (si = 7)) or ((t = 7) and (si = 8)) or ((t = 9) and (si = 10)) or ((t = 10) and (si = 11)) or ((t = 11) and (si = 12)) or ((t = 12) and (si = 13)) or ((t = 13) and (si = 14)) or ((t = 14) and (si = 15)) or ((t = 15) and (si = 16)) or ((t = 16) and (si = 17)) or ((t = 17) and (si = 18)) or ((t = 27) and (si = 28)) or ((t = 37) and (si = 38)) or ((t = 47) and (si = 48)) or ((t = 52) and (si = 53))) (type: boolean)
               Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 Statistics: Num rows: 2098 Data size: 211174 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out b/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
index 394af7e..eeb5847 100644
--- a/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
+++ b/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
@@ -1030,7 +1030,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col11, _col12, _col16
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: (((_col1 = _col7) and (_col3 = _col11)) and (_col0 = _col16)) (type: boolean)
+            predicate: ((_col1 = _col7) and (_col3 = _col11) and (_col0 = _col16)) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col12 (type: string), _col11 (type: int), _col7 (type: int), 3 (type: int), _col2 (type: int)
@@ -1111,7 +1111,7 @@ STAGE PLANS:
           outputColumnNames: _col1, _col2, _col3, _col5, _col6, _col8, _col9, _col10, _col12, _col13
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: ((((_col2 = _col9) and (_col1 = _col8)) and (_col3 = 3)) and (_col10 = 4)) (type: boolean)
+            predicate: ((_col2 = _col9) and (_col1 = _col8) and (_col3 = 3) and (_col10 = 4)) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col1 (type: int), _col2 (type: int), _col5 (type: double), _col6 (type: double), _col8 (type: int), _col9 (type: int), _col12 (type: double), _col13 (type: double)
@@ -1257,7 +1257,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col11, _col12, _col16
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: (((_col1 = _col7) and (_col3 = _col11)) and (_col0 = _col16)) (type: boolean)
+            predicate: ((_col1 = _col7) and (_col3 = _col11) and (_col0 = _col16)) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col12 (type: string), _col11 (type: int), _col7 (type: int), 4 (type: int), _col2 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/flatten_and_or.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/flatten_and_or.q.out b/ql/src/test/results/clientpositive/flatten_and_or.q.out
new file mode 100644
index 0000000..9c51ff3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/flatten_and_or.q.out
@@ -0,0 +1,66 @@
+PREHOOK: query: explain
+SELECT key
+FROM src
+WHERE
+   ((key = '0'
+   AND value = '8') OR (key = '1'
+   AND value = '5') OR (key = '2'
+   AND value = '6') OR (key = '3'
+   AND value = '8') OR (key = '4'
+   AND value = '1') OR (key = '5'
+   AND value = '6') OR (key = '6'
+   AND value = '1') OR (key = '7'
+   AND value = '1') OR (key = '8'
+   AND value = '1') OR (key = '9'
+   AND value = '1') OR (key = '10'
+   AND value = '3'))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+SELECT key
+FROM src
+WHERE
+   ((key = '0'
+   AND value = '8') OR (key = '1'
+   AND value = '5') OR (key = '2'
+   AND value = '6') OR (key = '3'
+   AND value = '8') OR (key = '4'
+   AND value = '1') OR (key = '5'
+   AND value = '6') OR (key = '6'
+   AND value = '1') OR (key = '7'
+   AND value = '1') OR (key = '8'
+   AND value = '1') OR (key = '9'
+   AND value = '1') OR (key = '10'
+   AND value = '3'))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (((key = '0') and (value = '8')) or ((key = '1') and (value = '5')) or ((key = '2') and (value = '6')) or ((key = '3') and (value = '8')) or ((key = '4') and (value = '1')) or ((key = '5') and (value = '6')) or ((key = '6') and (value = '1')) or ((key = '7') and (value = '1')) or ((key = '8') and (value = '1')) or ((key = '9') and (value = '1')) or ((key = '10') and (value = '3'))) (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
index 616eaa3..ca66c67 100644
--- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
+++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
@@ -225,7 +225,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
+              predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or (((key + key) = 200) or ((key - 100) = 100) or ((key = 300) and value is not null))) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: value (type: string)
@@ -237,7 +237,7 @@ STAGE PLANS:
         Forward
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
-            predicate: ((((VALUE._col0 + VALUE._col0) = 200) or ((VALUE._col0 - 100) = 100)) or ((VALUE._col0 = 300) and KEY._col0 is not null)) (type: boolean)
+            predicate: (((VALUE._col0 + VALUE._col0) = 200) or ((VALUE._col0 - 100) = 100) or ((VALUE._col0 = 300) and KEY._col0 is not null)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
               aggregations: count()
@@ -557,7 +557,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
+              predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or (((key + key) = 200) or ((key - 100) = 100) or ((key = 300) and value is not null))) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: value (type: string)
@@ -569,7 +569,7 @@ STAGE PLANS:
         Forward
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
-            predicate: ((((VALUE._col0 + VALUE._col0) = 200) or ((VALUE._col0 - 100) = 100)) or ((VALUE._col0 = 300) and KEY._col0 is not null)) (type: boolean)
+            predicate: (((VALUE._col0 + VALUE._col0) = 200) or ((VALUE._col0 - 100) = 100) or ((VALUE._col0 = 300) and KEY._col0 is not null)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
               aggregations: count()

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/input_testxpath4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_testxpath4.q.out b/ql/src/test/results/clientpositive/input_testxpath4.q.out
index b522b8a..4aea350 100644
--- a/ql/src/test/results/clientpositive/input_testxpath4.q.out
+++ b/ql/src/test/results/clientpositive/input_testxpath4.q.out
@@ -24,7 +24,7 @@ STAGE PLANS:
             alias: src_thrift
             Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((mstringstring['key_9'] is not null and lintstring.myint is not null) and lintstring is not null) (type: boolean)
+              predicate: (mstringstring['key_9'] is not null and lintstring.myint is not null and lintstring is not null) (type: boolean)
               Statistics: Num rows: 2 Data size: 558 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: mstringstring['key_9'] (type: string), lintstring.myint (type: array<int>)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out
index 26db67e..6ff13e4 100644
--- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out
+++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out
@@ -282,7 +282,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44
           Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
-            predicate: (((_col13 = _col25) and (_col0 = _col36)) and (_col0 = _col12)) (type: boolean)
+            predicate: ((_col13 = _col25) and (_col0 = _col36) and (_col0 = _col12)) (type: boolean)
             Statistics: Num rows: 1 Data size: 123 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/lineage3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out
index 8a7bd3e..75d88f8 100644
--- a/ql/src/test/results/clientpositive/lineage3.q.out
+++ b/ql/src/test/results/clientpositive/lineage3.q.out
@@ -269,7 +269,7 @@ PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: database:default
 PREHOOK: Output: default@dest_v3
-{"version":"1.0","engine":"mr","hash":"a0c2481ce1c24895a43a950f93a10da7","queryText":"create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as\n  select x.csmallint, x.cbigint bint1, x.ctinyint, c.cbigint bint2, x.cint, x.cfloat, c.cstring1\n  from alltypesorc c\n  join (\n     select a.csmallint csmallint, a.ctinyint ctinyint, a.cstring2 cstring2,\n           a.cint cint, a.cstring1 ctring1, b.cfloat cfloat, b.cbigint cbigint\n     from ( select * from alltypesorc a where cboolean1=true ) a\n     join alltypesorc b on (a.csmallint = b.cint)\n   ) x on (x.ctinyint = c.cbigint)\n  where x.csmallint=11\n  and x.cint > 899\n  and x.cfloat > 4.5\n  and c.cstring1 < '7'\n  and x.cint + x.cfloat + length(c.cstring1) < 1000","edges":[{"sources":[7],"targets":[0],"expression":"x._col15","edgeType":"PROJECTION"},{"sources":[8],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[9],"targets":[3],"expression":"x._col16","edgeType":"PROJECTION"},{"sources":[10],"targets":[4],"expression":"x._col18"
 ,"edgeType":"PROJECTION"},{"sources":[11],"targets":[5],"edgeType":"PROJECTION"},{"sources":[12],"targets":[6],"edgeType":"PROJECTION"},{"sources":[13],"targets":[0,1,3,2,4,5,6],"expression":"(a.cboolean1 = true)","edgeType":"PREDICATE"},{"sources":[7,10,12,11],"targets":[0,1,3,2,4,5,6],"expression":"(((((x.csmallint = 11) and (x.cint > 899)) and (x.cfloat > 4.5)) and (c.cstring1 < '7')) and (((x.cint + x.cfloat) + length(c.cstring1)) < 1000))","edgeType":"PREDICATE"},{"sources":[7,10],"targets":[0,1,3,2,4,5,6],"expression":"(UDFToInteger(a._col1) = b.cint)","edgeType":"PREDICATE"},{"sources":[8,9],"targets":[0,1,3,2,4,5,6],"expression":"(c.cbigint = UDFToLong(x._col1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.csmallint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_v3.ctinyint"},{"id":4,"
 vertexType":"COLUMN","vertexId":"default.dest_v3.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_v3.cfloat"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_v3.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"}]}
+{"version":"1.0","engine":"mr","hash":"a0c2481ce1c24895a43a950f93a10da7","queryText":"create view dest_v3 (a1, a2, a3, a4, a5, a6, a7) as\n  select x.csmallint, x.cbigint bint1, x.ctinyint, c.cbigint bint2, x.cint, x.cfloat, c.cstring1\n  from alltypesorc c\n  join (\n     select a.csmallint csmallint, a.ctinyint ctinyint, a.cstring2 cstring2,\n           a.cint cint, a.cstring1 ctring1, b.cfloat cfloat, b.cbigint cbigint\n     from ( select * from alltypesorc a where cboolean1=true ) a\n     join alltypesorc b on (a.csmallint = b.cint)\n   ) x on (x.ctinyint = c.cbigint)\n  where x.csmallint=11\n  and x.cint > 899\n  and x.cfloat > 4.5\n  and c.cstring1 < '7'\n  and x.cint + x.cfloat + length(c.cstring1) < 1000","edges":[{"sources":[7],"targets":[0],"expression":"x._col15","edgeType":"PROJECTION"},{"sources":[8],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[9],"targets":[3],"expression":"x._col16","edgeType":"PROJECTION"},{"sources":[10],"targets":[4],"expression":"x._col18"
 ,"edgeType":"PROJECTION"},{"sources":[11],"targets":[5],"edgeType":"PROJECTION"},{"sources":[12],"targets":[6],"edgeType":"PROJECTION"},{"sources":[13],"targets":[0,1,3,2,4,5,6],"expression":"(a.cboolean1 = true)","edgeType":"PREDICATE"},{"sources":[7,10,12,11],"targets":[0,1,3,2,4,5,6],"expression":"((x.csmallint = 11) and (x.cint > 899) and (x.cfloat > 4.5) and (c.cstring1 < '7') and (((x.cint + x.cfloat) + length(c.cstring1)) < 1000))","edgeType":"PREDICATE"},{"sources":[7,10],"targets":[0,1,3,2,4,5,6],"expression":"(UDFToInteger(a._col1) = b.cint)","edgeType":"PREDICATE"},{"sources":[8,9],"targets":[0,1,3,2,4,5,6],"expression":"(c.cbigint = UDFToLong(x._col1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_v3.csmallint"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_v3.bint2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_v3.ctinyint"},{"id":4,"vertex
 Type":"COLUMN","vertexId":"default.dest_v3.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_v3.cfloat"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_v3.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":13,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"}]}
 PREHOOK: query: alter view dest_v3 as
   select * from (
     select sum(a.ctinyint) over (partition by a.csmallint order by a.csmallint) a,

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
index bb5fedb..0d4cd15 100644
--- a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
@@ -768,28 +768,28 @@ STAGE PLANS:
             alias: orc_pred
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((d >= 10.0) and (d < 12.0)) and t is not null) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
-              Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+              Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col3 (type: string)
                   sort order: -
-                  Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
           expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 3
-            Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 888 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 3 Data size: 888 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -833,31 +833,31 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: orc_pred
-            filterExpr: (((((((d >= 10.0) and (d < 12.0)) and t is not null) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+            filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((d >= 10.0) and (d < 12.0)) and t is not null) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
-              Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+              Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col3 (type: string)
                   sort order: -
-                  Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: double)
       Reduce Operator Tree:
         Select Operator
           expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), VALUE._col2 (type: double), KEY.reducesinkkey0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 4 Data size: 1186 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 3
-            Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 3 Data size: 888 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 2 Data size: 593 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 3 Data size: 888 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -970,7 +970,7 @@ STAGE PLANS:
             alias: orc_pred
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and t is not null) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+              predicate: ((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
               Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
@@ -1064,10 +1064,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: orc_pred
-            filterExpr: (((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and t is not null) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+            filterExpr: ((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and t is not null) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+              predicate: ((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
               Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/ppd_gby_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_gby_join.q.out b/ql/src/test/results/clientpositive/ppd_gby_join.q.out
index 1acfc3d..e3f71e7 100644
--- a/ql/src/test/results/clientpositive/ppd_gby_join.q.out
+++ b/ql/src/test/results/clientpositive/ppd_gby_join.q.out
@@ -42,7 +42,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 2 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: ((_col0 > '20') and (((_col1 < 'val_50') or (_col0 > '2')) and (_col0 < '400'))) (type: boolean)
+                  predicate: ((_col0 > '20') and ((_col1 < 'val_50') or (_col0 > '2')) and (_col0 < '400')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: string)
@@ -73,7 +73,7 @@ STAGE PLANS:
                   predicate: (_col0 <> '4') (type: boolean)
                   Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((_col0 > '1') and ((_col0 > '20') and (_col0 < '400'))) (type: boolean)
+                    predicate: ((_col0 > '1') and (_col0 > '20') and (_col0 < '400')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                     Filter Operator
                       predicate: _col0 is not null (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/ppd_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join.q.out b/ql/src/test/results/clientpositive/ppd_join.q.out
index 2186a54..58c4e43 100644
--- a/ql/src/test/results/clientpositive/ppd_join.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join.q.out
@@ -39,7 +39,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 2 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: ((_col0 > '20') and (((_col1 < 'val_50') or (_col0 > '2')) and (_col0 < '400'))) (type: boolean)
+                  predicate: ((_col0 > '20') and ((_col1 < 'val_50') or (_col0 > '2')) and (_col0 < '400')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: string)
@@ -70,7 +70,7 @@ STAGE PLANS:
                   predicate: (_col0 <> '4') (type: boolean)
                   Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((_col0 > '1') and ((_col0 > '20') and (_col0 < '400'))) (type: boolean)
+                    predicate: ((_col0 > '1') and (_col0 > '20') and (_col0 < '400')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                     Filter Operator
                       predicate: _col0 is not null (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/ppd_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join2.q.out b/ql/src/test/results/clientpositive/ppd_join2.q.out
index 335d995..e99839e 100644
--- a/ql/src/test/results/clientpositive/ppd_join2.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join2.q.out
@@ -46,7 +46,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: ((_col0 <> '311') and (((_col1 <> 'val_50') or (_col0 > '1')) and (_col0 < '400'))) (type: boolean)
+                  predicate: ((_col0 <> '311') and ((_col1 <> 'val_50') or (_col0 > '1')) and (_col0 < '400')) (type: boolean)
                   Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((_col0 <> '305') and (_col0 <> '14')) (type: boolean)
@@ -74,7 +74,7 @@ STAGE PLANS:
                   predicate: (_col0 <> '14') (type: boolean)
                   Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((_col0 <> '302') and ((_col0 <> '311') and (_col0 < '400'))) (type: boolean)
+                    predicate: ((_col0 <> '302') and (_col0 <> '311') and (_col0 < '400')) (type: boolean)
                     Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                     Filter Operator
                       predicate: _col0 is not null (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/ppd_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join3.q.out b/ql/src/test/results/clientpositive/ppd_join3.q.out
index d658cfb..f2b0b60 100644
--- a/ql/src/test/results/clientpositive/ppd_join3.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join3.q.out
@@ -46,7 +46,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: ((_col0 > '0') and (((_col1 <> 'val_500') or (_col0 > '1')) and (_col0 < '400'))) (type: boolean)
+                  predicate: ((_col0 > '0') and ((_col1 <> 'val_500') or (_col0 > '1')) and (_col0 < '400')) (type: boolean)
                   Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: string)
@@ -77,7 +77,7 @@ STAGE PLANS:
                   predicate: (_col0 <> '4') (type: boolean)
                   Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((_col0 <> '11') and ((_col0 > '0') and (_col0 < '400'))) (type: boolean)
+                    predicate: ((_col0 <> '11') and (_col0 > '0') and (_col0 < '400')) (type: boolean)
                     Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                     Filter Operator
                       predicate: _col0 is not null (type: boolean)
@@ -134,7 +134,7 @@ STAGE PLANS:
                   predicate: (_col0 <> '1') (type: boolean)
                   Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((_col0 <> '11') and ((_col0 > '0') and ((_col0 < '400') and ((_col0 <> '12') and (_col0 <> '4'))))) (type: boolean)
+                    predicate: ((_col0 <> '11') and (_col0 > '0') and (_col0 < '400') and (_col0 <> '12') and (_col0 <> '4')) (type: boolean)
                     Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                     Filter Operator
                       predicate: _col0 is not null (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
index 2d1333b..9997166 100644
--- a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
@@ -122,7 +122,7 @@ STAGE PLANS:
                 outputColumnNames: _col0
                 Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: ((_col0 > '10') and ((_col0 < '20') and ((_col0 > '15') and (_col0 < '25')))) (type: boolean)
+                  predicate: ((_col0 > '10') and (_col0 < '20') and (_col0 > '15') and (_col0 < '25')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: _col0 is not null (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
index 7045855..7e9a0f3 100644
--- a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
+++ b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
@@ -833,7 +833,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col11, _col12, _col16
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Filter Operator
-                  predicate: (((_col1 = _col7) and (_col3 = _col11)) and (_col0 = _col16)) (type: boolean)
+                  predicate: ((_col1 = _col7) and (_col3 = _col11) and (_col0 = _col16)) (type: boolean)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: _col12 (type: string), _col11 (type: int), _col7 (type: int), 4 (type: int), _col2 (type: int)
@@ -887,7 +887,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col11, _col12, _col16
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Filter Operator
-                  predicate: (((_col1 = _col7) and (_col3 = _col11)) and (_col0 = _col16)) (type: boolean)
+                  predicate: ((_col1 = _col7) and (_col3 = _col11) and (_col0 = _col16)) (type: boolean)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: _col12 (type: string), _col11 (type: int), _col7 (type: int), 3 (type: int), _col2 (type: int)
@@ -941,7 +941,7 @@ STAGE PLANS:
                 outputColumnNames: _col1, _col2, _col3, _col5, _col6, _col8, _col9, _col10, _col12, _col13
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Filter Operator
-                  predicate: ((((_col2 = _col9) and (_col1 = _col8)) and (_col3 = 3)) and (_col10 = 4)) (type: boolean)
+                  predicate: ((_col2 = _col9) and (_col1 = _col8) and (_col3 = 3) and (_col10 = 4)) (type: boolean)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: _col1 (type: int), _col2 (type: int), _col5 (type: double), _col6 (type: double), _col8 (type: int), _col9 (type: int), _col12 (type: double), _col13 (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
index 5192dbb..f87308f 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
@@ -236,7 +236,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
+                    predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or (((key + key) = 200) or ((key - 100) = 100) or ((key = 300) and value is not null))) (type: boolean)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: string)
@@ -249,7 +249,7 @@ STAGE PLANS:
               Forward
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: ((((VALUE._col0 + VALUE._col0) = 200) or ((VALUE._col0 - 100) = 100)) or ((VALUE._col0 = 300) and KEY._col0 is not null)) (type: boolean)
+                  predicate: (((VALUE._col0 + VALUE._col0) = 200) or ((VALUE._col0 - 100) = 100) or ((VALUE._col0 = 300) and KEY._col0 is not null)) (type: boolean)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count()
@@ -580,7 +580,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
+                    predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or (((key + key) = 200) or ((key - 100) = 100) or ((key = 300) and value is not null))) (type: boolean)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: string)
@@ -593,7 +593,7 @@ STAGE PLANS:
               Forward
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: ((((VALUE._col0 + VALUE._col0) = 200) or ((VALUE._col0 - 100) = 100)) or ((VALUE._col0 = 300) and KEY._col0 is not null)) (type: boolean)
+                  predicate: (((VALUE._col0 + VALUE._col0) = 200) or ((VALUE._col0 - 100) = 100) or ((VALUE._col0 = 300) and KEY._col0 is not null)) (type: boolean)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count()

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual4.q.out b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual4.q.out
index e16884c..b30f4f4 100644
--- a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual4.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual4.q.out
@@ -286,7 +286,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44
                 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: (((_col13 = _col25) and (_col0 = _col36)) and (_col0 = _col12)) (type: boolean)
+                  predicate: ((_col13 = _col25) and (_col0 = _col36) and (_col0 = _col12)) (type: boolean)
                   Statistics: Num rows: 1 Data size: 123 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out b/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
index b3ebea9..306292a 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
@@ -47,7 +47,7 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 2 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Filter Operator
-                        predicate: ((_col0 > '20') and (((_col1 < 'val_50') or (_col0 > '2')) and (_col0 < '400'))) (type: boolean)
+                        predicate: ((_col0 > '20') and ((_col1 < 'val_50') or (_col0 > '2')) and (_col0 < '400')) (type: boolean)
                         Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         Select Operator
                           expressions: _col0 (type: string)
@@ -80,7 +80,7 @@ STAGE PLANS:
                         predicate: (_col0 <> '4') (type: boolean)
                         Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                         Filter Operator
-                          predicate: ((_col0 > '1') and ((_col0 > '20') and (_col0 < '400'))) (type: boolean)
+                          predicate: ((_col0 > '1') and (_col0 > '20') and (_col0 < '400')) (type: boolean)
                           Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                           Filter Operator
                             predicate: _col0 is not null (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/ppd_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join.q.out b/ql/src/test/results/clientpositive/spark/ppd_join.q.out
index 42a83f3..aee7630 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join.q.out
@@ -44,7 +44,7 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 2 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Filter Operator
-                        predicate: ((_col0 > '20') and (((_col1 < 'val_50') or (_col0 > '2')) and (_col0 < '400'))) (type: boolean)
+                        predicate: ((_col0 > '20') and ((_col1 < 'val_50') or (_col0 > '2')) and (_col0 < '400')) (type: boolean)
                         Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         Select Operator
                           expressions: _col0 (type: string)
@@ -77,7 +77,7 @@ STAGE PLANS:
                         predicate: (_col0 <> '4') (type: boolean)
                         Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                         Filter Operator
-                          predicate: ((_col0 > '1') and ((_col0 > '20') and (_col0 < '400'))) (type: boolean)
+                          predicate: ((_col0 > '1') and (_col0 > '20') and (_col0 < '400')) (type: boolean)
                           Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                           Filter Operator
                             predicate: _col0 is not null (type: boolean)


[16/50] [abbrv] hive git commit: HIVE-11416: CBO: Calcite Operator To Hive Operator (Calcite Return Path): Groupby Optimizer assumes the schema can match after removing RS and GBY (reviewed by Jesus Camacho Rodriguez)

Posted by se...@apache.org.
HIVE-11416: CBO: Calcite Operator To Hive Operator (Calcite Return Path): Groupby Optimizer assumes the schema can match after removing RS and GBY (reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/763cb02b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/763cb02b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/763cb02b

Branch: refs/heads/hbase-metastore
Commit: 763cb02b5eafb0ecd3fd0eb512636a1b092df671
Parents: 57ba795
Author: Pengcheng Xiong <px...@apache.org>
Authored: Tue Aug 11 11:26:48 2015 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Tue Aug 11 11:26:48 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/exec/Operator.java    | 25 ---------
 .../hive/ql/optimizer/GroupByOptimizer.java     | 58 +++++++++++++++++++-
 2 files changed, 57 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/763cb02b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
index 0f02737..acbe504 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java
@@ -769,31 +769,6 @@ public abstract class Operator<T extends OperatorDesc> implements Serializable,C
     }
   }
 
-  // Remove the operators till a certain depth.
-  // Return true if the remove was successful, false otherwise
-  public boolean removeChildren(int depth) {
-    Operator<? extends OperatorDesc> currOp = this;
-    for (int i = 0; i < depth; i++) {
-      // If there are more than 1 children at any level, don't do anything
-      if ((currOp.getChildOperators() == null) || (currOp.getChildOperators().isEmpty()) ||
-          (currOp.getChildOperators().size() > 1)) {
-        return false;
-      }
-      currOp = currOp.getChildOperators().get(0);
-    }
-
-    setChildOperators(currOp.getChildOperators());
-
-    List<Operator<? extends OperatorDesc>> parentOps =
-      new ArrayList<Operator<? extends OperatorDesc>>();
-    parentOps.add(this);
-
-    for (Operator<? extends OperatorDesc> op : currOp.getChildOperators()) {
-      op.setParentOperators(parentOps);
-    }
-    return true;
-  }
-
   /**
    * Replace one parent with another at the same position. Chilren of the new
    * parent are not updated

http://git-wip-us.apache.org/repos/asf/hive/blob/763cb02b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
index af54286..ce3f59a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.optimizer;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
@@ -31,9 +32,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.GroupByOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
+import org.apache.hadoop.hive.ql.exec.OperatorFactory;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
+import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.exec.SelectOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -520,12 +524,64 @@ public class GroupByOptimizer implements Transform {
         return;
       }
 
-      if (groupByOp.removeChildren(depth)) {
+      if (removeChildren(groupByOp, depth)) {
         // Use bucketized hive input format - that makes sure that one mapper reads the entire file
         groupByOp.setUseBucketizedHiveInputFormat(true);
         groupByOp.getConf().setMode(GroupByDesc.Mode.FINAL);
       }
     }
+
+    // Remove the operators till a certain depth.
+    // Return true if the remove was successful, false otherwise
+    public boolean removeChildren(Operator<? extends OperatorDesc> currOp, int depth) {
+      Operator<? extends OperatorDesc> inputOp = currOp;
+      for (int i = 0; i < depth; i++) {
+        // If there are more than 1 children at any level, don't do anything
+        if ((currOp.getChildOperators() == null) || (currOp.getChildOperators().isEmpty())
+            || (currOp.getChildOperators().size() > 1)) {
+          return false;
+        }
+        currOp = currOp.getChildOperators().get(0);
+      }
+
+      // add selectOp to match the schema
+      // after that, inputOp is the parent of selOp.
+      for (Operator<? extends OperatorDesc> op : inputOp.getChildOperators()) {
+        op.getParentOperators().clear();
+      }
+      inputOp.getChildOperators().clear();
+      Operator<? extends OperatorDesc> selOp = genOutputSelectForGroupBy(inputOp, currOp);
+
+      // update the childOp of selectOp
+      selOp.setChildOperators(currOp.getChildOperators());
+
+      // update the parentOp
+      for (Operator<? extends OperatorDesc> op : currOp.getChildOperators()) {
+        op.replaceParent(currOp, selOp);
+      }
+      return true;
+    }
+
+    private Operator<? extends OperatorDesc> genOutputSelectForGroupBy(
+        Operator<? extends OperatorDesc> parentOp, Operator<? extends OperatorDesc> currOp) {
+      assert (parentOp.getSchema().getSignature().size() == currOp.getSchema().getSignature().size());
+      Iterator<ColumnInfo> pIter = parentOp.getSchema().getSignature().iterator();
+      Iterator<ColumnInfo> cIter = currOp.getSchema().getSignature().iterator();
+      List<ExprNodeDesc> columns = new ArrayList<ExprNodeDesc>();
+      List<String> colName = new ArrayList<String>();
+      Map<String, ExprNodeDesc> columnExprMap = new HashMap<String, ExprNodeDesc>();
+      while (pIter.hasNext()) {
+        ColumnInfo pInfo = pIter.next();
+        ColumnInfo cInfo = cIter.next();
+        ExprNodeDesc column = new ExprNodeColumnDesc(pInfo.getType(), pInfo.getInternalName(),
+            pInfo.getTabAlias(), pInfo.getIsVirtualCol(), pInfo.isSkewedCol());
+        columns.add(column);
+        colName.add(cInfo.getInternalName());
+        columnExprMap.put(cInfo.getInternalName(), column);
+      }
+      return OperatorFactory.getAndMakeChild(new SelectDesc(columns, colName), new RowSchema(currOp
+          .getSchema().getSignature()), columnExprMap, parentOp);
+    }
   }
 
   /**


[14/50] [abbrv] hive git commit: HIVE-11511 Output the message of orcfiledump when ORC files are not specified (Shinichi Yamashita via gates)

Posted by se...@apache.org.
HIVE-11511 Output the message of orcfiledump when ORC files are not specified (Shinichi Yamashita via gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cfe9e484
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cfe9e484
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cfe9e484

Branch: refs/heads/hbase-metastore
Commit: cfe9e484f8624b590a728d758099c1fd5d069672
Parents: 7e53685
Author: Alan Gates <ga...@hortonworks.com>
Authored: Tue Aug 11 09:42:20 2015 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Tue Aug 11 09:42:20 2015 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cfe9e484/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
index cbbec36..4acb810 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
@@ -82,6 +82,10 @@ public final class FileDump {
     boolean printTimeZone = cli.hasOption('t');
     boolean jsonFormat = cli.hasOption('j');
     String[] files = cli.getArgs();
+    if (files.length == 0) {
+      System.err.println("Error : ORC files are not specified");
+      return;
+    }
     if (dumpData) {
       printData(Arrays.asList(files), conf);
     } else {


[15/50] [abbrv] hive git commit: HIVE-11340 - Create ORC based table using like clause doesn't copy compression property (Yongzhi Chen, reviewed by Chao Sun)

Posted by se...@apache.org.
HIVE-11340 - Create ORC based table using like clause doesn't copy compression property (Yongzhi Chen, reviewed by Chao Sun)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/57ba795c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/57ba795c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/57ba795c

Branch: refs/heads/hbase-metastore
Commit: 57ba795cbf98f275b7bae75669d8769aa35d9ee5
Parents: cfe9e48
Author: Yongzhi Chen <yo...@hotmail.com>
Authored: Tue Aug 11 09:58:30 2015 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Tue Aug 11 09:58:30 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/io/orc/OrcConf.java   |  2 +-
 .../apache/hadoop/hive/ql/io/orc/OrcSerde.java  |  6 +-
 .../test/queries/clientpositive/create_like.q   | 12 ++++
 .../results/clientpositive/create_like.q.out    | 66 ++++++++++++++++++++
 4 files changed, 84 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/57ba795c/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcConf.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcConf.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcConf.java
index 81b822f..132889c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcConf.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcConf.java
@@ -133,7 +133,7 @@ public enum OrcConf {
   private String lookupValue(Properties tbl, Configuration conf) {
     String result = null;
     if (tbl != null) {
-      result = conf.get(attribute);
+      result = tbl.getProperty(attribute);
     }
     if (result == null && conf != null) {
       result = conf.get(attribute);

http://git-wip-us.apache.org/repos/asf/hive/blob/57ba795c/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
index a381443..8beff4b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
@@ -22,6 +22,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Properties;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,7 +43,7 @@ import org.apache.hadoop.io.Writable;
  * A serde class for ORC.
  * It transparently passes the object to/from the ORC file reader/writer.
  */
-@SerDeSpec(schemaProps = {serdeConstants.LIST_COLUMNS, serdeConstants.LIST_COLUMN_TYPES})
+@SerDeSpec(schemaProps = {serdeConstants.LIST_COLUMNS, serdeConstants.LIST_COLUMN_TYPES, OrcSerde.COMPRESSION})
 public class OrcSerde implements SerDe, VectorizedSerde {
 
   private static final Log LOG = LogFactory.getLog(OrcSerde.class);
@@ -51,6 +52,7 @@ public class OrcSerde implements SerDe, VectorizedSerde {
   private ObjectInspector inspector = null;
 
   private VectorizedOrcSerde vos = null;
+  public static final String COMPRESSION = "orc.compress";
 
   final class OrcSerdeRow implements Writable {
     Object realRow;
@@ -82,6 +84,8 @@ public class OrcSerde implements SerDe, VectorizedSerde {
     // NOTE: if "columns.types" is missing, all columns will be of String type
     String columnTypeProperty = table.getProperty(serdeConstants.LIST_COLUMN_TYPES);
 
+    String compressType = OrcConf.COMPRESS.getString(table, conf);
+
     // Parse the configuration parameters
     ArrayList<String> columnNames = new ArrayList<String>();
     if (columnNameProperty != null && columnNameProperty.length() > 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/57ba795c/ql/src/test/queries/clientpositive/create_like.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/create_like.q b/ql/src/test/queries/clientpositive/create_like.q
index 3b04702..bd39731 100644
--- a/ql/src/test/queries/clientpositive/create_like.q
+++ b/ql/src/test/queries/clientpositive/create_like.q
@@ -83,3 +83,15 @@ DESCRIBE FORMATTED table6;
 
 drop table table5;
 
+create table orc_table (
+time string)
+stored as ORC tblproperties ("orc.compress"="SNAPPY");
+
+create table orc_table_using_like like orc_table;
+
+describe formatted orc_table_using_like;
+
+drop table orc_table_using_like;
+
+drop table orc_table;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/57ba795c/ql/src/test/results/clientpositive/create_like.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_like.q.out b/ql/src/test/results/clientpositive/create_like.q.out
index c93b134..a373178 100644
--- a/ql/src/test/results/clientpositive/create_like.q.out
+++ b/ql/src/test/results/clientpositive/create_like.q.out
@@ -579,3 +579,69 @@ POSTHOOK: query: drop table table5
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@table5
 POSTHOOK: Output: default@table5
+PREHOOK: query: create table orc_table (
+time string)
+stored as ORC tblproperties ("orc.compress"="SNAPPY")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_table
+POSTHOOK: query: create table orc_table (
+time string)
+stored as ORC tblproperties ("orc.compress"="SNAPPY")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_table
+PREHOOK: query: create table orc_table_using_like like orc_table
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_table_using_like
+POSTHOOK: query: create table orc_table_using_like like orc_table
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_table_using_like
+PREHOOK: query: describe formatted orc_table_using_like
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@orc_table_using_like
+POSTHOOK: query: describe formatted orc_table_using_like
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@orc_table_using_like
+# col_name            	data_type           	comment             
+	 	 
+time                	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	orc.compress        	SNAPPY              
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.ql.io.orc.OrcSerde	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.orc.OrcInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table orc_table_using_like
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_table_using_like
+PREHOOK: Output: default@orc_table_using_like
+POSTHOOK: query: drop table orc_table_using_like
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_table_using_like
+POSTHOOK: Output: default@orc_table_using_like
+PREHOOK: query: drop table orc_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@orc_table
+PREHOOK: Output: default@orc_table
+POSTHOOK: query: drop table orc_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@orc_table
+POSTHOOK: Output: default@orc_table