You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by we...@apache.org on 2017/05/05 17:32:35 UTC

[48/51] [partial] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
index 339da07..5f3baab 100644
--- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
+++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java
@@ -16,20 +16,15 @@
  */
 package org.apache.hadoop.hive.accumulo.predicate;
 
-import static org.junit.Assert.assertNotNull;
-
-import java.sql.Date;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-
+import com.google.common.collect.Lists;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants;
+import org.apache.hadoop.hive.accumulo.TestAccumuloDefaultIndexScanner;
 import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding;
 import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping;
+import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters;
 import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
 import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
 import org.apache.hadoop.hive.ql.lib.Dispatcher;
@@ -42,22 +37,29 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
-import org.apache.hadoop.hive.ql.udf.UDFLike;
 import org.apache.hadoop.hive.ql.udf.UDFToString;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPPlus;
+import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-import com.google.common.collect.Lists;
+import java.sql.Date;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
+import static org.junit.Assert.assertNotNull;
 
 /**
  *
@@ -66,12 +68,14 @@ public class TestAccumuloRangeGenerator {
 
   private AccumuloPredicateHandler handler;
   private HiveAccumuloRowIdColumnMapping rowIdMapping;
+  private Configuration conf;
 
   @Before
   public void setup() {
     handler = AccumuloPredicateHandler.getInstance();
     rowIdMapping = new HiveAccumuloRowIdColumnMapping(AccumuloHiveConstants.ROWID,
-        ColumnEncoding.STRING, "row", TypeInfoFactory.stringTypeInfo.toString());
+        ColumnEncoding.STRING,"row", TypeInfoFactory.stringTypeInfo.toString());
+    conf = new Configuration(true);
   }
 
   @Test
@@ -108,7 +112,7 @@ public class TestAccumuloRangeGenerator {
     List<Range> expectedRanges = Arrays
         .asList(new Range(new Key("f"), true, new Key("m\0"), false));
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -163,7 +167,7 @@ public class TestAccumuloRangeGenerator {
     // Should generate (-inf,+inf)
     List<Range> expectedRanges = Arrays.asList(new Range());
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -236,7 +240,7 @@ public class TestAccumuloRangeGenerator {
     // Should generate ['q', +inf)
     List<Range> expectedRanges = Arrays.asList(new Range(new Key("q"), true, null, false));
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -291,7 +295,7 @@ public class TestAccumuloRangeGenerator {
     // Should generate [f,+inf)
     List<Range> expectedRanges = Arrays.asList(new Range(new Key("f"), true, null, false));
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -349,7 +353,7 @@ public class TestAccumuloRangeGenerator {
     List<Range> expectedRanges = Arrays.asList(new Range(new Key("2014-01-01"), true, new Key(
         "2014-07-01"), false));
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -397,7 +401,7 @@ public class TestAccumuloRangeGenerator {
     ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
         new GenericUDFOPEqualOrGreaterThan(), Arrays.asList(key, cast));
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "key");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "key");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -446,7 +450,7 @@ public class TestAccumuloRangeGenerator {
     ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
         new GenericUDFOPAnd(), bothFilters);
 
-    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid");
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
     Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
         Collections.<Rule,NodeProcessor> emptyMap(), null);
     GraphWalker ogw = new DefaultGraphWalker(disp);
@@ -464,4 +468,161 @@ public class TestAccumuloRangeGenerator {
     Object result = nodeOutput.get(both);
     Assert.assertNull(result);
   }
+
+  @Test
+  public void testRangeOverStringIndexedField() throws Exception {
+    // age >= '10'
+    ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "age", null, false);
+    ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "10");
+    List<ExprNodeDesc> children = Lists.newArrayList();
+    children.add(column);
+    children.add(constant);
+    ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
+        new GenericUDFOPEqualOrGreaterThan(), children);
+    assertNotNull(node);
+
+    // age <= '50'
+    ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "age", null,
+        false);
+    ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "50");
+    List<ExprNodeDesc> children2 = Lists.newArrayList();
+    children2.add(column2);
+    children2.add(constant2);
+    ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
+        new GenericUDFOPEqualOrLessThan(), children2);
+    assertNotNull(node2);
+
+    // And UDF
+    List<ExprNodeDesc> bothFilters = Lists.newArrayList();
+    bothFilters.add(node);
+    bothFilters.add(node2);
+    ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
+        new GenericUDFOPAnd(), bothFilters);
+
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
+    rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
+    Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+        Collections.<Rule,NodeProcessor> emptyMap(), null);
+    GraphWalker ogw = new DefaultGraphWalker(disp);
+    ArrayList<Node> topNodes = new ArrayList<Node>();
+    topNodes.add(both);
+    HashMap<Node,Object> nodeOutput = new HashMap<Node,Object>();
+
+    try {
+      ogw.startWalking(topNodes, nodeOutput);
+    } catch (SemanticException ex) {
+      throw new RuntimeException(ex);
+    }
+
+    // Filters are using an index which should match 3 rows
+    Object result = nodeOutput.get(both);
+    if ( result instanceof  List) {
+      List results = (List) result;
+      Assert.assertEquals(3, results.size());
+      Assert.assertTrue("does not contain row1", results.contains(new Range("row1")));
+      Assert.assertTrue("does not contain row2", results.contains(new Range("row2")));
+      Assert.assertTrue("does not contain row3", results.contains(new Range("row3")));
+    } else {
+      Assert.fail("Results not a list");
+    }
+  }
+
+  @Test
+  public void testRangeOverIntegerIndexedField() throws Exception {
+    // cars >= 2
+    ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "cars", null, false);
+    ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, 2);
+    List<ExprNodeDesc> children = Lists.newArrayList();
+    children.add(column);
+    children.add(constant);
+    ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo,
+        new GenericUDFOPEqualOrGreaterThan(), children);
+    assertNotNull(node);
+
+    //  cars <= 9
+    ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "cars", null,
+        false);
+    ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, 9);
+    List<ExprNodeDesc> children2 = Lists.newArrayList();
+    children2.add(column2);
+    children2.add(constant2);
+    ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo,
+        new GenericUDFOPEqualOrLessThan(), children2);
+    assertNotNull(node2);
+
+    // And UDF
+    List<ExprNodeDesc> bothFilters = Lists.newArrayList();
+    bothFilters.add(node);
+    bothFilters.add(node2);
+    ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo,
+        new GenericUDFOPAnd(), bothFilters);
+
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
+    rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
+    Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+        Collections.<Rule,NodeProcessor> emptyMap(), null);
+    GraphWalker ogw = new DefaultGraphWalker(disp);
+    ArrayList<Node> topNodes = new ArrayList<Node>();
+    topNodes.add(both);
+    HashMap<Node,Object> nodeOutput = new HashMap<Node,Object>();
+
+    try {
+      ogw.startWalking(topNodes, nodeOutput);
+    } catch (SemanticException ex) {
+      throw new RuntimeException(ex);
+    }
+
+    // Filters are using an index which should match 3 rows
+    Object result = nodeOutput.get(both);
+    if ( result instanceof  List) {
+      List results = (List) result;
+      Assert.assertEquals(3, results.size());
+      Assert.assertTrue("does not contain row1", results.contains(new Range("row1")));
+      Assert.assertTrue("does not contain row2", results.contains(new Range("row2")));
+      Assert.assertTrue("does not contain row3", results.contains(new Range("row3")));
+    } else {
+      Assert.fail("Results not a list");
+    }
+  }
+
+  @Test
+  public void testRangeOverBooleanIndexedField() throws Exception {
+    // mgr == true
+    ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.booleanTypeInfo, "mgr", null, false);
+    ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, true);
+    List<ExprNodeDesc> children = Lists.newArrayList();
+    children.add(column);
+    children.add(constant);
+    ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo,
+        new GenericUDFOPEqual(), children);
+    assertNotNull(node);
+
+    AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(conf, handler, rowIdMapping, "rid");
+    rangeGenerator.setIndexScanner(TestAccumuloDefaultIndexScanner.buildMockHandler(10));
+    Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator,
+        Collections.<Rule,NodeProcessor> emptyMap(), null);
+    GraphWalker ogw = new DefaultGraphWalker(disp);
+    ArrayList<Node> topNodes = new ArrayList<Node>();
+    topNodes.add(node);
+    HashMap<Node,Object> nodeOutput = new HashMap<Node,Object>();
+
+    try {
+      ogw.startWalking(topNodes, nodeOutput);
+    } catch (SemanticException ex) {
+      throw new RuntimeException(ex);
+    }
+
+    // Filters are using an index which should match 2 rows
+    Object result = nodeOutput.get(node);
+    if ( result instanceof  List) {
+      List results = (List) result;
+      Assert.assertEquals(2, results.size());
+      Assert.assertTrue("does not contain row1", results.contains( new Range( "row1")));
+      Assert.assertTrue("does not contain row3", results.contains( new Range( "row3")));
+    }
+    else {
+      Assert.fail("Results not a list");
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/accumulo-handler/src/test/queries/positive/accumulo_index.q
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/queries/positive/accumulo_index.q b/accumulo-handler/src/test/queries/positive/accumulo_index.q
new file mode 100644
index 0000000..52a33af
--- /dev/null
+++ b/accumulo-handler/src/test/queries/positive/accumulo_index.q
@@ -0,0 +1,44 @@
+DROP TABLE accumulo_index_test;
+
+CREATE TABLE accumulo_index_test (
+   rowid string,
+   active boolean,
+   num_offices tinyint,
+   num_personel smallint,
+   total_manhours int,
+   num_shareholders bigint,
+   eff_rating float,
+   err_rating double,
+   yearly_production decimal,
+   start_date date,
+   address varchar(100),
+   phone char(13),
+   last_update timestamp )
+ROW FORMAT SERDE 'org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe'
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+   "accumulo.columns.mapping" = ":rowID,a:act,a:off,a:per,a:mhs,a:shs,a:eff,a:err,a:yp,a:sd,a:addr,a:ph,a:lu",
+   "accumulo.table.name"="accumulo_index_test",
+   "accumulo.indexed.columns"="*",
+   "accumulo.indextable.name"="accumulo_index_idx"
+ );
+
+
+insert into accumulo_index_test values( "row1", true, 55, 107, 555555, 1223232332,
+                                 4.5, 0.8, 1232223, "2001-10-10", "123 main street",
+                                 "555-555-5555", "2016-02-22 12:45:07.000000000");
+
+select * from accumulo_index_test where active = 'true';
+select * from accumulo_index_test where num_offices = 55;
+select * from accumulo_index_test where num_personel = 107;
+select * from accumulo_index_test where total_manhours < 555556;
+select * from accumulo_index_test where num_shareholders >= 1223232331;
+select * from accumulo_index_test where eff_rating <= 4.5;
+select * from accumulo_index_test where err_rating >= 0.8;
+select * from accumulo_index_test where yearly_production = 1232223;
+select * from accumulo_index_test where start_date = "2001-10-10";
+select * from accumulo_index_test where address >= "100 main street";
+select * from accumulo_index_test where phone <= "555-555-5555";
+select * from accumulo_index_test where last_update >= "2016-02-22 12:45:07";
+
+DROP TABLE accumulo_index_test;

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/accumulo-handler/src/test/results/positive/accumulo_index.q.out
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/results/positive/accumulo_index.q.out b/accumulo-handler/src/test/results/positive/accumulo_index.q.out
new file mode 100644
index 0000000..5cb3d73
--- /dev/null
+++ b/accumulo-handler/src/test/results/positive/accumulo_index.q.out
@@ -0,0 +1,180 @@
+PREHOOK: query: DROP TABLE accumulo_index_test
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE accumulo_index_test
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE accumulo_index_test (
+   rowid string,
+   active boolean,
+   num_offices tinyint,
+   num_personel smallint,
+   total_manhours int,
+   num_shareholders bigint,
+   eff_rating float,
+   err_rating double,
+   yearly_production decimal,
+   start_date date,
+   address varchar(100),
+   phone char(13),
+   last_update timestamp )
+ROW FORMAT SERDE 'org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe'
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+   "accumulo.columns.mapping" = ":rowID,a:act,a:off,a:per,a:mhs,a:shs,a:eff,a:err,a:yp,a:sd,a:addr,a:ph,a:lu",
+   "accumulo.table.name"="accumulo_index_test",
+   "accumulo.indexed.columns"="*",
+   "accumulo.indextable.name"="accumulo_index_idx"
+ )
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_index_test
+POSTHOOK: query: CREATE TABLE accumulo_index_test (
+   rowid string,
+   active boolean,
+   num_offices tinyint,
+   num_personel smallint,
+   total_manhours int,
+   num_shareholders bigint,
+   eff_rating float,
+   err_rating double,
+   yearly_production decimal,
+   start_date date,
+   address varchar(100),
+   phone char(13),
+   last_update timestamp )
+ROW FORMAT SERDE 'org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe'
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+   "accumulo.columns.mapping" = ":rowID,a:act,a:off,a:per,a:mhs,a:shs,a:eff,a:err,a:yp,a:sd,a:addr,a:ph,a:lu",
+   "accumulo.table.name"="accumulo_index_test",
+   "accumulo.indexed.columns"="*",
+   "accumulo.indextable.name"="accumulo_index_idx"
+ )
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_index_test
+PREHOOK: query: insert into accumulo_index_test values( "row1", true, 55, 107, 555555, 1223232332,
+                                 4.5, 0.8, 1232223, "2001-10-10", "123 main street",
+                                 "555-555-5555", "2016-02-22 12:45:07.000000000")
+PREHOOK: type: QUERY
+PREHOOK: Output: default@accumulo_index_test
+POSTHOOK: query: insert into accumulo_index_test values( "row1", true, 55, 107, 555555, 1223232332,
+                                 4.5, 0.8, 1232223, "2001-10-10", "123 main street",
+                                 "555-555-5555", "2016-02-22 12:45:07.000000000")
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@accumulo_index_test
+PREHOOK: query: select * from accumulo_index_test where active = 'true'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where active = 'true'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where num_offices = 55
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where num_offices = 55
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where num_personel = 107
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where num_personel = 107
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where total_manhours < 555556
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where total_manhours < 555556
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where num_shareholders >= 1223232331
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where num_shareholders >= 1223232331
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where eff_rating <= 4.5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where eff_rating <= 4.5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where err_rating >= 0.8
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where err_rating >= 0.8
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where yearly_production = 1232223
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where yearly_production = 1232223
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where start_date = "2001-10-10"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where start_date = "2001-10-10"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where address >= "100 main street"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where address >= "100 main street"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where phone <= "555-555-5555"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where phone <= "555-555-5555"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: select * from accumulo_index_test where last_update >= "2016-02-22 12:45:07"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_index_test where last_update >= "2016-02-22 12:45:07"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_index_test
+#### A masked pattern was here ####
+row1	true	55	107	555555	1223232332	4.5	0.8	1232223	2001-10-10	123 main street	555-555-5555 	2016-02-22 12:45:07
+PREHOOK: query: DROP TABLE accumulo_index_test
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_index_test
+PREHOOK: Output: default@accumulo_index_test
+POSTHOOK: query: DROP TABLE accumulo_index_test
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_index_test
+POSTHOOK: Output: default@accumulo_index_test

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/pom.xml
----------------------------------------------------------------------
diff --git a/beeline/pom.xml b/beeline/pom.xml
index 58ca92e..b0a9a0b 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.2.0-SNAPSHOT</version>
+    <version>3.0.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/src/java/org/apache/hive/beeline/BeeLine.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 3c8fccc..a589f33 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -22,6 +22,7 @@
  */
 package org.apache.hive.beeline;
 
+import java.io.BufferedReader;
 import java.io.ByteArrayInputStream;
 import java.io.Closeable;
 import java.io.EOFException;
@@ -29,6 +30,7 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.InputStreamReader;
 import java.io.PrintStream;
 import java.io.SequenceInputStream;
 import java.lang.reflect.InvocationTargetException;
@@ -59,6 +61,7 @@ import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.ListIterator;
 import java.util.Map;
@@ -148,6 +151,10 @@ public class BeeLine implements Closeable {
   // Indicates if this instance of beeline is running in compatibility mode, or beeline mode
   private boolean isBeeLine = true;
 
+  // Indicates that we are in test mode.
+  // Print only the errors, the operation log and the query results.
+  private boolean isTestMode = false;
+
   private static final Options options = new Options();
 
   public static final String BEELINE_DEFAULT_JDBC_DRIVER = "org.apache.hive.jdbc.HiveDriver";
@@ -538,6 +545,7 @@ public class BeeLine implements Closeable {
       public void run() {
         try {
           if (history != null) {
+            history.setMaxSize(getOpts().getMaxHistoryRows());
             history.flush();
           }
         } catch (IOException e) {
@@ -1379,6 +1387,55 @@ public class BeeLine implements Closeable {
     return lineTrimmed.startsWith("#") || lineTrimmed.startsWith("--");
   }
 
+  String[] getCommands(File file) throws IOException {
+    List<String> cmds = new LinkedList<String>();
+    try (BufferedReader reader =
+             new BufferedReader(new InputStreamReader(new FileInputStream(file), "UTF-8"))) {
+      StringBuilder cmd = null;
+      while (true) {
+        String scriptLine = reader.readLine();
+
+        if (scriptLine == null) {
+          break;
+        }
+
+        String trimmedLine = scriptLine.trim();
+        if (getOpts().getTrimScripts()) {
+          scriptLine = trimmedLine;
+        }
+
+        if (cmd != null) {
+          // we're continuing an existing command
+          cmd.append("\n");
+          cmd.append(scriptLine);
+          if (trimmedLine.endsWith(";")) {
+            // this command has terminated
+            cmds.add(cmd.toString());
+            cmd = null;
+          }
+        } else {
+          // we're starting a new command
+          if (needsContinuation(scriptLine)) {
+            // multi-line
+            cmd = new StringBuilder(scriptLine);
+          } else {
+            // single-line
+            cmds.add(scriptLine);
+          }
+        }
+      }
+
+      if (cmd != null) {
+        // ### REVIEW: oops, somebody left the last command
+        // unterminated; should we fix it for them or complain?
+        // For now be nice and fix it.
+        cmd.append(";");
+        cmds.add(cmd.toString());
+      }
+    }
+    return cmds.toArray(new String[0]);
+  }
+
   /**
    * Print the specified message to the console
    *
@@ -2385,4 +2442,19 @@ public class BeeLine implements Closeable {
   public void setCurrentDatabase(String currentDatabase) {
     this.currentDatabase = currentDatabase;
   }
+
+  /**
+   * Setting the BeeLine into test mode.
+   * Print only the errors, the operation log and the query results.
+   * Should be used only by tests.
+   *
+   * @param isTestMode
+   */
+  void setIsTestMode(boolean isTestMode) {
+    this.isTestMode = isTestMode;
+  }
+
+  boolean isTestMode() {
+    return isTestMode;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java b/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java
index 7e6846d..f85d8a3 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java
@@ -45,6 +45,7 @@ import jline.Terminal;
 import jline.TerminalFactory;
 import jline.console.completer.Completer;
 import jline.console.completer.StringsCompleter;
+import jline.console.history.MemoryHistory;
 import org.apache.hadoop.hive.conf.HiveConf;
 
 class BeeLineOpts implements Completer {
@@ -61,7 +62,7 @@ class BeeLineOpts implements Completer {
   public static final int DEFAULT_MAX_COLUMN_WIDTH = 50;
   public static final int DEFAULT_INCREMENTAL_BUFFER_ROWS = 1000;
 
-  public static String URL_ENV_PREFIX = "BEELINE_URL_";
+  public static final String URL_ENV_PREFIX = "BEELINE_URL_";
 
   private final BeeLine beeLine;
   private boolean autosave = false;
@@ -100,6 +101,7 @@ class BeeLineOpts implements Completer {
 
   private final File rcFile = new File(saveDir(), "beeline.properties");
   private String historyFile = new File(saveDir(), "history").getAbsolutePath();
+  private int maxHistoryRows = MemoryHistory.DEFAULT_MAX_SIZE;
 
   private String scriptFile = null;
   private String[] initFiles = null;
@@ -431,6 +433,17 @@ class BeeLineOpts implements Completer {
     return historyFile;
   }
 
+  /**
+   * @param numRows - the number of rows to store in history file
+   */
+  public void setMaxHistoryRows(int numRows) {
+    this.maxHistoryRows = numRows;
+  }
+
+  public int getMaxHistoryRows() {
+    return maxHistoryRows;
+  }
+
   public void setScriptFile(String scriptFile) {
     this.scriptFile = scriptFile;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/src/java/org/apache/hive/beeline/Commands.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java b/beeline/src/java/org/apache/hive/beeline/Commands.java
index 99ee82c..08d53ca 100644
--- a/beeline/src/java/org/apache/hive/beeline/Commands.java
+++ b/beeline/src/java/org/apache/hive/beeline/Commands.java
@@ -55,7 +55,6 @@ import java.util.TreeSet;
 
 import org.apache.hadoop.hive.common.cli.ShellCmdExecutor;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.conf.HiveVariableSource;
 import org.apache.hadoop.hive.conf.SystemVariables;
 import org.apache.hadoop.hive.conf.VariableSubstitution;
@@ -978,7 +977,8 @@ public class Commands {
           hasResults = ((CallableStatement) stmnt).execute();
         } else {
           stmnt = beeLine.createStatement();
-          if (beeLine.getOpts().isSilent()) {
+          // In test mode we want the operation logs regardless of the settings
+          if (!beeLine.isTestMode() && beeLine.getOpts().isSilent()) {
             hasResults = stmnt.execute(sql);
           } else {
             InPlaceUpdateStream.EventNotifier eventNotifier =
@@ -1221,46 +1221,61 @@ public class Commands {
     if (entireLineAsCommand) {
       cmdList.add(line);
     } else {
-      StringBuffer command = new StringBuffer();
+      StringBuilder command = new StringBuilder();
 
+      // Marker to track if there is starting double quote without an ending double quote
       boolean hasUnterminatedDoubleQuote = false;
-      boolean hasUntermindatedSingleQuote = false;
 
+      // Marker to track if there is starting single quote without an ending double quote
+      boolean hasUnterminatedSingleQuote = false;
+
+      // Index of the last seen semicolon in the given line
       int lastSemiColonIndex = 0;
       char[] lineChars = line.toCharArray();
 
+      // Marker to track if the previous character was an escape character
       boolean wasPrevEscape = false;
+
       int index = 0;
+
+      // Iterate through the line and invoke the addCmdPart method whenever a semicolon is seen that is not inside a
+      // quoted string
       for (; index < lineChars.length; index++) {
         switch (lineChars[index]) {
           case '\'':
+            // If a single quote is seen and the index is not inside a double quoted string and the previous character
+            // was not an escape, then update the hasUnterminatedSingleQuote flag
             if (!hasUnterminatedDoubleQuote && !wasPrevEscape) {
-              hasUntermindatedSingleQuote = !hasUntermindatedSingleQuote;
+              hasUnterminatedSingleQuote = !hasUnterminatedSingleQuote;
             }
             wasPrevEscape = false;
             break;
           case '\"':
-            if (!hasUntermindatedSingleQuote && !wasPrevEscape) {
+            // If a double quote is seen and the index is not inside a single quoted string and the previous character
+            // was not an escape, then update the hasUnterminatedDoubleQuote flag
+            if (!hasUnterminatedSingleQuote && !wasPrevEscape) {
               hasUnterminatedDoubleQuote = !hasUnterminatedDoubleQuote;
             }
             wasPrevEscape = false;
             break;
           case ';':
-            if (!hasUnterminatedDoubleQuote && !hasUntermindatedSingleQuote) {
+            // If a semicolon is seen, and the line isn't inside a quoted string, then treat
+            // line[lastSemiColonIndex] to line[index] as a single command
+            if (!hasUnterminatedDoubleQuote && !hasUnterminatedSingleQuote) {
               addCmdPart(cmdList, command, line.substring(lastSemiColonIndex, index));
               lastSemiColonIndex = index + 1;
             }
             wasPrevEscape = false;
             break;
           case '\\':
-            wasPrevEscape = true;
+            wasPrevEscape = !wasPrevEscape;
             break;
           default:
             wasPrevEscape = false;
             break;
         }
       }
-      // if the line doesn't end with a ; or if the line is empty, add the cmd part
+      // If the line doesn't end with a ; or if the line is empty, add the cmd part
       if (lastSemiColonIndex != index || lineChars.length == 0) {
         addCmdPart(cmdList, command, line.substring(lastSemiColonIndex, index));
       }
@@ -1272,7 +1287,7 @@ public class Commands {
    * Given a cmdpart (e.g. if a command spans multiple lines), add to the current command, and if
    * applicable add that command to the {@link List} of commands
    */
-  private void addCmdPart(List<String> cmdList, StringBuffer command, String cmdpart) {
+  private void addCmdPart(List<String> cmdList, StringBuilder command, String cmdpart) {
     if (cmdpart.endsWith("\\")) {
       command.append(cmdpart.substring(0, cmdpart.length() - 1)).append(";");
       return;
@@ -1327,7 +1342,12 @@ public class Commands {
       try {
         List<String> queryLogs = hiveStatement.getQueryLog();
         for (String log : queryLogs) {
-          commands.beeLine.info(log);
+          if (!commands.beeLine.isTestMode()) {
+            commands.beeLine.info(log);
+          } else {
+            // In test mode print the logs to the output
+            commands.beeLine.output(log);
+          }
         }
         if (!queryLogs.isEmpty()) {
           notifier.operationLogShowedToUser();
@@ -1371,7 +1391,12 @@ public class Commands {
           return;
         }
         for (String log : logs) {
-          beeLine.info(log);
+          if (!beeLine.isTestMode()) {
+            beeLine.info(log);
+          } else {
+            // In test mode print the logs to the output
+            beeLine.output(log);
+          }
         }
       } while (logs.size() > 0);
     } else {
@@ -1773,60 +1798,10 @@ public class Commands {
       return false;
     }
 
-    List<String> cmds = new LinkedList<String>();
-
     try {
-      BufferedReader reader = new BufferedReader(new FileReader(
-          parts[1]));
-      try {
-        // ### NOTE: fix for sf.net bug 879427
-        StringBuilder cmd = null;
-        for (;;) {
-          String scriptLine = reader.readLine();
-
-          if (scriptLine == null) {
-            break;
-          }
-
-          String trimmedLine = scriptLine.trim();
-          if (beeLine.getOpts().getTrimScripts()) {
-            scriptLine = trimmedLine;
-          }
-
-          if (cmd != null) {
-            // we're continuing an existing command
-            cmd.append(" \n");
-            cmd.append(scriptLine);
-            if (trimmedLine.endsWith(";")) {
-              // this command has terminated
-              cmds.add(cmd.toString());
-              cmd = null;
-            }
-          } else {
-            // we're starting a new command
-            if (beeLine.needsContinuation(scriptLine)) {
-              // multi-line
-              cmd = new StringBuilder(scriptLine);
-            } else {
-              // single-line
-              cmds.add(scriptLine);
-            }
-          }
-        }
-
-        if (cmd != null) {
-          // ### REVIEW: oops, somebody left the last command
-          // unterminated; should we fix it for them or complain?
-          // For now be nice and fix it.
-          cmd.append(";");
-          cmds.add(cmd.toString());
-        }
-      } finally {
-        reader.close();
-      }
-
+      String[] cmds = beeLine.getCommands(new File(parts[1]));
       // success only if all the commands were successful
-      return beeLine.runCommands(cmds) == cmds.size();
+      return beeLine.runCommands(cmds) == cmds.length;
     } catch (Exception e) {
       return beeLine.error(e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java b/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
index 181f0d2..711f6a8 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaHelper.java
@@ -292,7 +292,7 @@ public class HiveSchemaHelper {
 
   // Derby commandline parser
   public static class DerbyCommandParser extends AbstractCommandParser {
-    private static String DERBY_NESTING_TOKEN = "RUN";
+    private static final String DERBY_NESTING_TOKEN = "RUN";
 
     public DerbyCommandParser(String dbOpts, String msUsername, String msPassword,
         HiveConf hiveConf) {
@@ -380,11 +380,11 @@ public class HiveSchemaHelper {
 
   // Postgres specific parser
   public static class PostgresCommandParser extends AbstractCommandParser {
-    private static String POSTGRES_NESTING_TOKEN = "\\i";
+    private static final String POSTGRES_NESTING_TOKEN = "\\i";
     @VisibleForTesting
-    public static String POSTGRES_STANDARD_STRINGS_OPT = "SET standard_conforming_strings";
+    public static final String POSTGRES_STANDARD_STRINGS_OPT = "SET standard_conforming_strings";
     @VisibleForTesting
-    public static String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81";
+    public static final String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81";
 
     public PostgresCommandParser(String dbOpts, String msUsername, String msPassword,
         HiveConf hiveConf) {
@@ -427,7 +427,7 @@ public class HiveSchemaHelper {
 
   //Oracle specific parser
   public static class OracleCommandParser extends AbstractCommandParser {
-    private static String ORACLE_NESTING_TOKEN = "@";
+    private static final String ORACLE_NESTING_TOKEN = "@";
 
     public OracleCommandParser(String dbOpts, String msUsername, String msPassword,
         HiveConf hiveConf) {
@@ -451,7 +451,7 @@ public class HiveSchemaHelper {
 
   //MSSQL specific parser
   public static class MSSQLCommandParser extends AbstractCommandParser {
-    private static String MSSQL_NESTING_TOKEN = ":r";
+    private static final String MSSQL_NESTING_TOKEN = ":r";
 
     public MSSQLCommandParser(String dbOpts, String msUsername, String msPassword,
         HiveConf hiveConf) {

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index 2c088c9..7dd4d5f 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -89,14 +89,7 @@ public class HiveSchemaTool {
     }
     this.hiveConf = hiveConf;
     this.dbType = dbType;
-    this.metaStoreSchemaInfo = new MetaStoreSchemaInfo(hiveHome, hiveConf, dbType);
-    userName = hiveConf.get(ConfVars.METASTORE_CONNECTION_USER_NAME.varname);
-    try {
-      passWord = ShimLoader.getHadoopShims().getPassword(hiveConf,
-          HiveConf.ConfVars.METASTOREPWD.varname);
-    } catch (IOException err) {
-      throw new HiveMetaException("Error getting metastore password", err);
-    }
+    this.metaStoreSchemaInfo = new MetaStoreSchemaInfo(hiveHome, dbType);
   }
 
   public HiveConf getHiveConf() {
@@ -593,29 +586,40 @@ public class HiveSchemaTool {
   }
 
   public void doValidate() throws HiveMetaException {
-    System.out.println("Starting metastore validation");
+    System.out.println("Starting metastore validation\n");
     Connection conn = getConnectionToMetastore(false);
+    boolean success = true;
     try {
-      if (validateSchemaVersions(conn))
+      if (validateSchemaVersions(conn)) {
         System.out.println("[SUCCESS]\n");
-      else
+      } else {
+        success = false;
         System.out.println("[FAIL]\n");
-      if (validateSequences(conn))
+      }
+      if (validateSequences(conn)) {
         System.out.println("[SUCCESS]\n");
-      else
+      } else {
+        success = false;
         System.out.println("[FAIL]\n");
-      if (validateSchemaTables(conn))
+      }
+      if (validateSchemaTables(conn)) {
         System.out.println("[SUCCESS]\n");
-      else
+      } else {
+        success = false;
         System.out.println("[FAIL]\n");
-      if (validateLocations(conn, this.validationServers))
+      }
+      if (validateLocations(conn, this.validationServers)) {
         System.out.println("[SUCCESS]\n");
-      else
+      } else {
+        success = false;
         System.out.println("[FAIL]\n");
-      if (validateColumnNullValues(conn))
+      }
+      if (validateColumnNullValues(conn)) {
         System.out.println("[SUCCESS]\n");
-      else
+      } else {
+        success = false;
         System.out.println("[FAIL]\n");
+      }
     } finally {
       if (conn != null) {
         try {
@@ -626,7 +630,13 @@ public class HiveSchemaTool {
       }
     }
 
-    System.out.println("Done with metastore validation");
+    System.out.print("Done with metastore validation: ");
+    if (!success) {
+      System.out.println("[FAIL]");
+      System.exit(1);
+    } else {
+      System.out.println("[SUCCESS]");
+    }
   }
 
   boolean validateSequences(Connection conn) throws HiveMetaException {
@@ -719,14 +729,14 @@ public class HiveSchemaTool {
       version = getMetaStoreSchemaVersion(hmsConn);
     } catch (HiveMetaException he) {
       System.err.println("Failed to determine schema version from Hive Metastore DB," + he.getMessage());
-      LOG.error("Failed to determine schema version from Hive Metastore DB," + he.getMessage());
+      LOG.debug("Failed to determine schema version from Hive Metastore DB," + he.getMessage());
       return false;
     }
 
     // re-open the hms connection
     hmsConn = getConnectionToMetastore(false);
 
-    LOG.info("Validating tables in the schema for version " + version);
+    LOG.debug("Validating tables in the schema for version " + version);
     try {
       metadata       = conn.getMetaData();
       String[] types = {"TABLE"};
@@ -760,7 +770,7 @@ public class HiveSchemaTool {
       subScripts.addAll(findCreateTable(schemaFile, schemaTables));
       while (subScripts.size() > 0) {
         schemaFile = baseDir + "/" + dbType + "/" + subScripts.remove(0);
-        LOG.info("Parsing subscript " + schemaFile);
+        LOG.debug("Parsing subscript " + schemaFile);
         subScripts.addAll(findCreateTable(schemaFile, schemaTables));
       }
     } catch (Exception e) {
@@ -775,13 +785,12 @@ public class HiveSchemaTool {
     int schemaSize = schemaTables.size();
     schemaTables.removeAll(dbTables);
     if (schemaTables.size() > 0) {
-      System.out.println("Found " + schemaSize + " tables in schema definition, " +
-          schemaTables.size() + " tables [ " + Arrays.toString(schemaTables.toArray())
+      System.out.println("Table(s) [ " + Arrays.toString(schemaTables.toArray())
           + " ] are missing from the metastore database schema.");
       System.out.println("Schema table validation failed!!!");
       return false;
     } else {
-      System.out.println("Succeeded in schema table validation. " + schemaSize + " tables matched");
+      System.out.println("Succeeded in schema table validation.");
       return true;
     }
   }
@@ -1102,9 +1111,19 @@ public class HiveSchemaTool {
 
       if (line.hasOption("userName")) {
         schemaTool.setUserName(line.getOptionValue("userName"));
+      } else {
+        schemaTool.setUserName(
+            schemaTool.getHiveConf().get(ConfVars.METASTORE_CONNECTION_USER_NAME.varname));
       }
       if (line.hasOption("passWord")) {
         schemaTool.setPassWord(line.getOptionValue("passWord"));
+      } else {
+        try {
+          schemaTool.setPassWord(ShimLoader.getHadoopShims().getPassword(schemaTool.getHiveConf(),
+              HiveConf.ConfVars.METASTOREPWD.varname));
+        } catch (IOException err) {
+          throw new HiveMetaException("Error getting metastore password", err);
+        }
       }
       if (line.hasOption("dryRun")) {
         schemaTool.setDryRun(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java b/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
index 93a6231..7d7d9ae 100644
--- a/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
+++ b/beeline/src/java/org/apache/hive/beeline/hs2connection/UserHS2ConnectionFileParser.java
@@ -44,7 +44,7 @@ public class UserHS2ConnectionFileParser implements HS2ConnectionFileParser {
           + (System.getProperty("os.name").toLowerCase().indexOf("windows") != -1 ? "" : ".")
           + "beeline" + File.separator;
   public static final String ETC_HIVE_CONF_LOCATION =
-      File.separator + "etc" + File.separator + "conf" + File.separator + "hive";
+      File.separator + "etc" + File.separator + "hive" + File.separator + "conf";
 
   private final List<String> locations = new ArrayList<>();
   private static final Logger log = LoggerFactory.getLogger(UserHS2ConnectionFileParser.class);

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/src/java/org/apache/hive/beeline/logs/BeelineInPlaceUpdateStream.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/logs/BeelineInPlaceUpdateStream.java b/beeline/src/java/org/apache/hive/beeline/logs/BeelineInPlaceUpdateStream.java
index 51344e3..40cde0c 100644
--- a/beeline/src/java/org/apache/hive/beeline/logs/BeelineInPlaceUpdateStream.java
+++ b/beeline/src/java/org/apache/hive/beeline/logs/BeelineInPlaceUpdateStream.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hive.beeline.logs;
 
 import org.apache.hadoop.hive.common.log.InPlaceUpdate;

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/src/main/resources/BeeLine.properties
----------------------------------------------------------------------
diff --git a/beeline/src/main/resources/BeeLine.properties b/beeline/src/main/resources/BeeLine.properties
index e33b812..7011221 100644
--- a/beeline/src/main/resources/BeeLine.properties
+++ b/beeline/src/main/resources/BeeLine.properties
@@ -202,6 +202,7 @@ cmd-usage: Usage: java org.apache.hive.cli.beeline.BeeLine \n \
 \  --delimiterForDSV=DELIMITER     specify the delimiter for delimiter-separated values output format (default: |)\n \
 \  --isolation=LEVEL               set the transaction isolation level\n \
 \  --nullemptystring=[true/false]  set to true to get historic behavior of printing null as empty string\n \
+\  --maxHistoryRows=MAXHISTORYROWS The maximum number of rows to store beeline history.\n \
 \  --help                          display this message\n \
 \n \
 \  Example:\n \

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/src/test/org/apache/hive/beeline/TestBeelineArgParsing.java
----------------------------------------------------------------------
diff --git a/beeline/src/test/org/apache/hive/beeline/TestBeelineArgParsing.java b/beeline/src/test/org/apache/hive/beeline/TestBeelineArgParsing.java
index d73d374..2884cc8 100644
--- a/beeline/src/test/org/apache/hive/beeline/TestBeelineArgParsing.java
+++ b/beeline/src/test/org/apache/hive/beeline/TestBeelineArgParsing.java
@@ -319,4 +319,16 @@ public class TestBeelineArgParsing {
     Assert.assertTrue(bl.properties.get(0).equals("props"));
     bl.close();
   }
+
+  /**
+   * Test maxHistoryRows parameter option.
+   */
+  @Test
+  public void testMaxHistoryRows() throws Exception {
+    TestBeeline bl = new TestBeeline();
+    String args[] = new String[] {"--maxHistoryRows=100"};
+    Assert.assertEquals(0, bl.initArgs(args));
+    Assert.assertTrue(bl.getOpts().getMaxHistoryRows() == 100);
+    bl.close();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
----------------------------------------------------------------------
diff --git a/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java b/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
index 8d386da..4cd5124 100644
--- a/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
+++ b/beeline/src/test/org/apache/hive/beeline/TestHiveSchemaTool.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hive.beeline;
 
 import org.apache.hadoop.hive.conf.HiveConf;

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/cli/pom.xml
----------------------------------------------------------------------
diff --git a/cli/pom.xml b/cli/pom.xml
index 10fb1b9..71d214b 100644
--- a/cli/pom.xml
+++ b/cli/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.2.0-SNAPSHOT</version>
+    <version>3.0.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java
----------------------------------------------------------------------
diff --git a/cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java b/cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java
index f1806a0..24550fa 100644
--- a/cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java
+++ b/cli/src/java/org/apache/hadoop/hive/cli/RCFileCat.java
@@ -54,7 +54,7 @@ public class RCFileCat implements Tool{
   // In verbose mode, print an update per RECORD_PRINT_INTERVAL records
   private static final int RECORD_PRINT_INTERVAL = (1024*1024);
 
-  protected static boolean test=false;
+  protected boolean test = false;
 
   public RCFileCat() {
     super();
@@ -63,12 +63,12 @@ public class RCFileCat implements Tool{
       onUnmappableCharacter(CodingErrorAction.REPLACE);
   }
 
-  private static CharsetDecoder decoder;
+  private CharsetDecoder decoder;
 
   Configuration conf = null;
 
-  private static String TAB ="\t";
-  private static String NEWLINE ="\r\n";
+  private static final String TAB ="\t";
+  private static final String NEWLINE ="\r\n";
 
   @Override
   public int run(String[] args) throws Exception {
@@ -243,7 +243,7 @@ public class RCFileCat implements Tool{
     this.conf = conf;
   }
 
-  private static String Usage = "RCFileCat [--start=start_offet] [--length=len] [--verbose] " +
+  private static final String Usage = "RCFileCat [--start=start_offet] [--length=len] [--verbose] " +
       "[--column-sizes | --column-sizes-pretty] [--file-sizes] fileName";
 
   public static void main(String[] args) {
@@ -262,7 +262,7 @@ public class RCFileCat implements Tool{
     }
   }
 
-  private static void setupBufferedOutput() {
+  private void setupBufferedOutput() {
     OutputStream pdataOut;
     if (test) {
       pdataOut = System.out;
@@ -275,6 +275,7 @@ public class RCFileCat implements Tool{
         new PrintStream(bos, false);
     System.setOut(ps);
   }
+
   private static void printUsage(String errorMsg) {
     System.err.println(Usage);
     if(errorMsg != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java
----------------------------------------------------------------------
diff --git a/cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java b/cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java
index 11ceb31..4cb4a19 100644
--- a/cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java
+++ b/cli/src/test/org/apache/hadoop/hive/cli/TestRCFileCat.java
@@ -25,8 +25,6 @@ import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
-import java.net.URI;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -77,7 +75,7 @@ public class TestRCFileCat {
     writer.close();
 
     RCFileCat fileCat = new RCFileCat();
-    RCFileCat.test=true;
+    fileCat.test=true;
     fileCat.setConf(new Configuration());
 
     // set fake input and output streams

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/common/pom.xml
----------------------------------------------------------------------
diff --git a/common/pom.xml b/common/pom.xml
index 8474a87..e6722ba 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.2.0-SNAPSHOT</version>
+    <version>3.0.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 
@@ -69,20 +69,24 @@
       <version>${jline.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.eclipse.jetty.aggregate</groupId>
-      <artifactId>jetty-all</artifactId>
-      <version>${jetty.version}</version>
-      <exclusions>
-	<exclusion>
-	  <groupId>javax.servlet</groupId>
-	  <artifactId>servlet-api</artifactId>
-	</exclusion>
-      </exclusions>
+      <groupId>javax.servlet</groupId>
+      <artifactId>javax.servlet-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-rewrite</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-server</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-servlet</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.eclipse.jetty.orbit</groupId>
-      <artifactId>javax.servlet</artifactId>
-      <version>${javax-servlet.version}</version>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-webapp</artifactId>
     </dependency>
     <dependency>
       <groupId>joda-time</groupId>
@@ -129,6 +133,18 @@
           <artifactId>servlet-api</artifactId>
         </exclusion>
         <exclusion>
+          <groupId>javax.servlet.jsp</groupId>
+          <artifactId>jsp-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
           <groupId>org.slf4j</groupId>
           <artifactId>slf4j-log4j12</artifactId>
         </exclusion>
@@ -149,6 +165,10 @@
           <artifactId>servlet-api</artifactId>
         </exclusion>
         <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
           <groupId>org.slf4j</groupId>
           <artifactId>slf4j-log4j12</artifactId>
         </exclusion>

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/common/src/java/org/apache/hadoop/hive/common/CopyOnFirstWriteProperties.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/CopyOnFirstWriteProperties.java b/common/src/java/org/apache/hadoop/hive/common/CopyOnFirstWriteProperties.java
new file mode 100644
index 0000000..d4d078b
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/CopyOnFirstWriteProperties.java
@@ -0,0 +1,344 @@
+package org.apache.hadoop.hive.common;
+
+import com.google.common.collect.Interner;
+import com.google.common.collect.Interners;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.io.Reader;
+import java.lang.reflect.Field;
+import java.util.Collection;
+import java.util.Enumeration;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+
+/**
+ * A special subclass of Properties, designed to save memory when many identical
+ * copies of Properties would otherwise be created. To achieve that, we use the
+ * 'interned' field, which points to the same Properties object for all instances
+ * of CopyOnFirstWriteProperties that were created with identical contents.
+ * However, as soon as any mutating method is called, contents are copied from
+ * the 'interned' properties into this instance.
+ */
+public class CopyOnFirstWriteProperties extends Properties {
+
+  private Properties interned;
+
+  private static Interner<Properties> INTERNER = Interners.newWeakInterner();
+  private static Field defaultsField;
+  static {
+    try {
+      defaultsField = Properties.class.getDeclaredField("defaults");
+      defaultsField.setAccessible(true);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  public CopyOnFirstWriteProperties(Properties p) {
+    setInterned(p);
+  }
+
+  /*************   Public API of java.util.Properties   ************/
+
+  @Override
+  public String getProperty(String key) {
+    if (interned != null) return interned.getProperty(key);
+    else return super.getProperty(key);
+  }
+
+  @Override
+  public String getProperty(String key, String defaultValue) {
+    if (interned != null) return interned.getProperty(key, defaultValue);
+    else return super.getProperty(key, defaultValue);
+  }
+
+  @Override
+  public void list(PrintStream out) {
+    if (interned != null) interned.list(out);
+    else super.list(out);
+  }
+
+  @Override
+  public void list(PrintWriter out) {
+    if (interned != null) interned.list(out);
+    else super.list(out);
+  }
+
+  @Override
+  public synchronized void load(InputStream inStream) throws IOException {
+    if (interned != null) copyFromInternedToThis();
+    super.load(inStream);
+  }
+
+  @Override
+  public synchronized void load(Reader reader) throws IOException {
+    if (interned != null) copyFromInternedToThis();
+    super.load(reader);
+  }
+
+  @Override
+  public synchronized void loadFromXML(InputStream inStream) throws IOException {
+    if (interned != null) copyFromInternedToThis();
+    super.loadFromXML(inStream);
+  }
+
+  @Override
+  public Enumeration<?> propertyNames() {
+    if (interned != null) return interned.propertyNames();
+    else return super.propertyNames();
+  }
+
+  @Override
+  public synchronized Object setProperty(String key, String value) {
+    if (interned != null) copyFromInternedToThis();
+    return super.setProperty(key, value);
+  }
+
+  @Override
+  public void store(OutputStream out, String comments) throws IOException {
+    if (interned != null) interned.store(out, comments);
+    else super.store(out, comments);
+  }
+
+  @Override
+  public void storeToXML(OutputStream os, String comment) throws IOException {
+    if (interned != null) interned.storeToXML(os, comment);
+    else super.storeToXML(os, comment);
+  }
+
+  @Override
+  public void storeToXML(OutputStream os, String comment, String encoding)
+      throws IOException {
+    if (interned != null) interned.storeToXML(os, comment, encoding);
+    else super.storeToXML(os, comment, encoding);
+  }
+
+  @Override
+  public Set<String> stringPropertyNames() {
+    if (interned != null) return interned.stringPropertyNames();
+    else return super.stringPropertyNames();
+  }
+
+  /*************   Public API of java.util.Hashtable   ************/
+
+  @Override
+  public synchronized void clear() {
+    if (interned != null) copyFromInternedToThis();
+    super.clear();
+  }
+
+  @Override
+  public synchronized Object clone() {
+    if (interned != null) return new CopyOnFirstWriteProperties(interned);
+    else return super.clone();
+  }
+
+  @Override
+  public synchronized Object compute(Object key, BiFunction remappingFunction) {
+    if (interned != null) copyFromInternedToThis();  // We do this because if function returns null,
+                                       // the mapping for key is removed, i.e. the table is mutated.
+    return super.compute(key, remappingFunction);
+  }
+
+  @Override
+  public synchronized Object computeIfAbsent(Object key, Function mappingFunction) {
+    if (interned != null) copyFromInternedToThis();
+    return super.computeIfAbsent(key, mappingFunction);
+  }
+
+  @Override
+  public synchronized Object computeIfPresent(Object key, BiFunction remappingFunction) {
+    if (interned != null) copyFromInternedToThis();
+    return super.computeIfPresent(key, remappingFunction);
+  }
+
+  @Override
+  public synchronized boolean contains(Object value) {
+    if (interned != null) return interned.contains(value);
+    else return super.contains(value);
+  }
+
+  @Override
+  public synchronized boolean containsKey(Object key) {
+    if (interned != null) return interned.containsKey(key);
+    else return super.containsKey(key);
+  }
+
+  @Override
+  public synchronized boolean containsValue(Object value) {
+    if (interned != null) return interned.containsValue(value);
+    else return super.containsValue(value);
+  }
+
+  @Override
+  public synchronized Enumeration<Object> elements() {
+    if (interned != null) return interned.elements();
+    else return super.elements();
+  }
+
+  @Override
+  public Set<Map.Entry<Object, Object>> entrySet() {
+    if (interned != null) return interned.entrySet();
+    else return super.entrySet();
+  }
+
+  @Override
+  public synchronized boolean equals(Object o) {
+    if (interned != null) return interned.equals(o);
+    else return super.equals(o);
+  }
+
+  @Override
+  public synchronized void forEach(BiConsumer action) {
+    if (interned != null) interned.forEach(action);
+    else super.forEach(action);
+  }
+
+  @Override
+  public synchronized Object get(Object key) {
+    if (interned != null) return interned.get(key);
+    else return super.get(key);
+  }
+
+  @Override
+  public synchronized Object getOrDefault(Object key, Object defaultValue) {
+    if (interned != null) return interned.getOrDefault(key, defaultValue);
+    else return super.getOrDefault(key, defaultValue);
+  }
+
+  @Override
+  public synchronized int hashCode() {
+    if (interned != null) return interned.hashCode();
+    else return super.hashCode();
+  }
+
+  @Override
+  public synchronized boolean isEmpty() {
+    if (interned != null) return interned.isEmpty();
+    else return super.isEmpty();
+  }
+
+  @Override
+  public synchronized Enumeration<Object> keys() {
+    if (interned != null) return interned.keys();
+    else return super.keys();
+  }
+
+  @Override
+  public Set<Object> keySet() {
+    if (interned != null) return interned.keySet();
+    else return super.keySet();
+  }
+
+  @Override
+  public synchronized Object merge(Object key, Object value, BiFunction remappingFunction) {
+    if (interned != null) copyFromInternedToThis();
+    return super.merge(key, value, remappingFunction);
+  }
+
+  @Override
+  public synchronized Object put(Object key, Object value) {
+    if (interned != null) copyFromInternedToThis();
+    return super.put(key, value);
+  }
+
+  @Override
+  public synchronized void putAll(Map<? extends Object, ? extends Object> t) {
+    if (interned != null) copyFromInternedToThis();
+    super.putAll(t);
+  }
+
+  @Override
+  public synchronized Object putIfAbsent(Object key, Object value) {
+    if (interned != null) copyFromInternedToThis();
+    return super.putIfAbsent(key, value);
+  }
+
+  @Override
+  public synchronized Object remove(Object key) {
+    if (interned != null) copyFromInternedToThis();
+    return super.remove(key);
+  }
+
+  @Override
+  public synchronized boolean remove(Object key, Object value) {
+    if (interned != null) copyFromInternedToThis();
+    return super.remove(key, value);
+  }
+
+  @Override
+  public synchronized Object replace(Object key, Object value) {
+    if (interned != null) copyFromInternedToThis();
+    return super.replace(key, value);
+  }
+
+  @Override
+  public synchronized boolean replace(Object key, Object oldValue, Object newValue) {
+    if (interned != null) copyFromInternedToThis();
+    return super.replace(key, oldValue, newValue);
+  }
+
+  @Override
+  public synchronized void replaceAll(BiFunction function) {
+    if (interned != null) copyFromInternedToThis();
+    super.replaceAll(function);
+  }
+
+  @Override
+  public synchronized int size() {
+    if (interned != null) return interned.size();
+    else return super.size();
+  }
+
+  @Override
+  public synchronized String toString() {
+    if (interned != null) return interned.toString();
+    else return super.toString();
+  }
+
+  @Override
+  public Collection<Object> values() {
+    if (interned != null) return interned.values();
+    else return super.values();
+  }
+
+  /*************   Private implementation ************/
+
+  private void copyFromInternedToThis() {
+    for (Map.Entry<?,?> e : interned.entrySet()) {
+      super.put(e.getKey(), e.getValue());
+    }
+    try {
+      // Unfortunately, we cannot directly read a protected field of non-this object
+      this.defaults = (Properties) defaultsField.get(interned);
+    } catch (IllegalAccessException e) {   // Shouldn't happen
+      throw new RuntimeException(e);
+    }
+    setInterned(null);
+  }
+
+  public void setInterned(Properties p) {
+    if (p != null) {
+      this.interned = INTERNER.intern(p);
+    } else {
+      this.interned = p;
+    }
+  }
+
+  // These methods are required by serialization
+
+  public CopyOnFirstWriteProperties() {
+  }
+
+  public Properties getInterned() {
+    return interned;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index c6bc9b9..0f7401c 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -25,6 +25,8 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.security.AccessControlException;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.BitSet;
 import java.util.Collection;
 import java.util.HashSet;
@@ -357,6 +359,12 @@ public final class FileUtils {
     return getPathOrParentThatExists(fs, parentPath);
   }
 
+  public static void checkFileAccessWithImpersonation(final FileSystem fs, final FileStatus stat,
+      final FsAction action, final String user)
+      throws IOException, AccessControlException, InterruptedException, Exception {
+    checkFileAccessWithImpersonation(fs, stat, action, user, null);
+  }
+
   /**
    * Perform a check to determine if the user is able to access the file passed in.
    * If the user name passed in is different from the current user, this method will
@@ -371,13 +379,15 @@ public final class FileUtils {
    *             check will be performed within a doAs() block to use the access privileges
    *             of this user. In this case the user must be configured to impersonate other
    *             users, otherwise this check will fail with error.
+   * @param children List of children to be collected. If this is null, no children are collected.
+   *        To be set only if this is a directory
    * @throws IOException
    * @throws AccessControlException
    * @throws InterruptedException
    * @throws Exception
    */
   public static void checkFileAccessWithImpersonation(final FileSystem fs,
-      final FileStatus stat, final FsAction action, final String user)
+      final FileStatus stat, final FsAction action, final String user, final List<FileStatus> children)
           throws IOException, AccessControlException, InterruptedException, Exception {
     UserGroupInformation ugi = Utils.getUGI();
     String currentUser = ugi.getShortUserName();
@@ -385,6 +395,7 @@ public final class FileUtils {
     if (user == null || currentUser.equals(user)) {
       // No need to impersonate user, do the checks as the currently configured user.
       ShimLoader.getHadoopShims().checkFileAccess(fs, stat, action);
+      addChildren(fs, stat.getPath(), children);
       return;
     }
 
@@ -397,6 +408,7 @@ public final class FileUtils {
         public Object run() throws Exception {
           FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf());
           ShimLoader.getHadoopShims().checkFileAccess(fsAsUser, stat, action);
+          addChildren(fsAsUser, stat.getPath(), children);
           return null;
         }
       });
@@ -405,6 +417,20 @@ public final class FileUtils {
     }
   }
 
+  private static void addChildren(FileSystem fsAsUser, Path path, List<FileStatus> children)
+      throws IOException {
+    if (children != null) {
+      FileStatus[] listStatus;
+      try {
+        listStatus = fsAsUser.listStatus(path);
+      } catch (IOException e) {
+        LOG.warn("Unable to list files under " + path + " : " + e);
+        throw e;
+      }
+      children.addAll(Arrays.asList(listStatus));
+    }
+  }
+
   /**
    * Check if user userName has permissions to perform the given FsAction action
    * on all files under the file whose FileStatus fileStatus is provided
@@ -431,20 +457,26 @@ public final class FileUtils {
       dirActionNeeded.and(FsAction.EXECUTE);
     }
 
+    List<FileStatus> subDirsToCheck = null;
+    if (isDir && recurse) {
+      subDirsToCheck = new ArrayList<FileStatus>();
+    }
+
     try {
-      checkFileAccessWithImpersonation(fs, fileStatus, action, userName);
+      checkFileAccessWithImpersonation(fs, fileStatus, action, userName, subDirsToCheck);
     } catch (AccessControlException err) {
       // Action not permitted for user
+      LOG.warn("Action " + action + " denied on " + fileStatus.getPath() + " for user " + userName);
       return false;
     }
 
-    if ((!isDir) || (!recurse)) {
+    if (subDirsToCheck == null || subDirsToCheck.isEmpty()) {
       // no sub dirs to be checked
       return true;
     }
+
     // check all children
-    FileStatus[] childStatuses = fs.listStatus(fileStatus.getPath());
-    for (FileStatus childStatus : childStatuses) {
+    for (FileStatus childStatus : subDirsToCheck) {
       // check children recursively - recurse is true if we're here.
       if (!isActionPermittedForFileHierarchy(fs, childStatus, userName, action, true)) {
         return false;
@@ -486,11 +518,30 @@ public final class FileUtils {
     return false;
   }
   public static boolean isOwnerOfFileHierarchy(FileSystem fs, FileStatus fileStatus, String userName)
-      throws IOException {
+      throws IOException, InterruptedException {
     return isOwnerOfFileHierarchy(fs, fileStatus, userName, true);
   }
 
-  public static boolean isOwnerOfFileHierarchy(FileSystem fs, FileStatus fileStatus,
+  public static boolean isOwnerOfFileHierarchy(final FileSystem fs,
+      final FileStatus fileStatus, final String userName, final boolean recurse)
+      throws IOException, InterruptedException {
+    UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(userName,
+        UserGroupInformation.getLoginUser());
+    try {
+      boolean isOwner = proxyUser.doAs(new PrivilegedExceptionAction<Boolean>() {
+        @Override
+        public Boolean run() throws Exception {
+          FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf());
+          return checkIsOwnerOfFileHierarchy(fsAsUser, fileStatus, userName, recurse);
+        }
+      });
+      return isOwner;
+    } finally {
+      FileSystem.closeAllForUGI(proxyUser);
+    }
+  }
+
+  public static boolean checkIsOwnerOfFileHierarchy(FileSystem fs, FileStatus fileStatus,
       String userName, boolean recurse)
       throws IOException {
     if (!fileStatus.getOwner().equals(userName)) {
@@ -505,59 +556,24 @@ public final class FileUtils {
     FileStatus[] childStatuses = fs.listStatus(fileStatus.getPath());
     for (FileStatus childStatus : childStatuses) {
       // check children recursively - recurse is true if we're here.
-      if (!isOwnerOfFileHierarchy(fs, childStatus, userName, true)) {
+      if (!checkIsOwnerOfFileHierarchy(fs, childStatus, userName, true)) {
         return false;
       }
     }
     return true;
   }
 
-  public static boolean mkdir(FileSystem fs, Path f, Configuration conf) throws IOException {
-    boolean inheritPerms = HiveConf.getBoolVar(conf, ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
-    return mkdir(fs, f, inheritPerms, conf);
-  }
-
   /**
    * Creates the directory and all necessary parent directories.
    * @param fs FileSystem to use
    * @param f path to create.
-   * @param inheritPerms whether directory inherits the permission of the last-existing parent path
    * @param conf Hive configuration
    * @return true if directory created successfully.  False otherwise, including if it exists.
    * @throws IOException exception in creating the directory
    */
-  public static boolean mkdir(FileSystem fs, Path f, boolean inheritPerms, Configuration conf) throws IOException {
+  public static boolean mkdir(FileSystem fs, Path f, Configuration conf) throws IOException {
     LOG.info("Creating directory if it doesn't exist: " + f);
-    if (!inheritPerms) {
-      //just create the directory
-      return fs.mkdirs(f);
-    } else {
-      //Check if the directory already exists. We want to change the permission
-      //to that of the parent directory only for newly created directories.
-      try {
-        return fs.getFileStatus(f).isDir();
-      } catch (FileNotFoundException ignore) {
-      }
-      //inherit perms: need to find last existing parent path, and apply its permission on entire subtree.
-      Path lastExistingParent = f;
-      Path firstNonExistentParent = null;
-      while (!fs.exists(lastExistingParent)) {
-        firstNonExistentParent = lastExistingParent;
-        lastExistingParent = lastExistingParent.getParent();
-      }
-      boolean success = fs.mkdirs(f);
-      if (!success) {
-        return false;
-      } else {
-        //set on the entire subtree
-        if (inheritPerms) {
-          HdfsUtils.setFullFileStatus(conf,
-                  new HdfsUtils.HadoopFileStatus(conf, fs, lastExistingParent), fs,
-                  firstNonExistentParent, true);
-        }
-        return true;
-      }
-    }
+    return fs.mkdirs(f);
   }
 
   public static Path makeAbsolute(FileSystem fileSystem, Path path) throws IOException {
@@ -610,11 +626,6 @@ public final class FileUtils {
     if (!triedDistcp) {
       copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
     }
-
-    boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
-    if (copied && inheritPerms) {
-      HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, dstFS, dst.getParent()), dstFS, dst, true);
-    }
     return copied;
   }
 
@@ -626,15 +637,19 @@ public final class FileUtils {
    * @return true if move successful
    * @throws IOException
    */
-  public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf)
+  public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf, boolean purge)
       throws IOException {
     LOG.debug("deleting  " + f);
     boolean result = false;
     try {
-      result = Trash.moveToAppropriateTrash(fs, f, conf);
-      if (result) {
-        LOG.trace("Moved to trash: " + f);
-        return true;
+      if(purge) {
+        LOG.debug("purge is set to true. Not moving to Trash " + f);
+      } else {
+        result = Trash.moveToAppropriateTrash(fs, f, conf);
+        if (result) {
+          LOG.trace("Moved to trash: " + f);
+          return true;
+        }
       }
     } catch (IOException ioe) {
       // for whatever failure reason including that trash has lower encryption zone
@@ -646,13 +661,11 @@ public final class FileUtils {
     if (!result) {
       LOG.error("Failed to delete " + f);
     }
-
     return result;
   }
 
-  public static boolean renameWithPerms(FileSystem fs, Path sourcePath,
-                               Path destPath, boolean inheritPerms,
-                               Configuration conf) throws IOException {
+  public static boolean rename(FileSystem fs, Path sourcePath,
+                               Path destPath, Configuration conf) throws IOException {
     LOG.info("Renaming " + sourcePath + " to " + destPath);
 
     // If destPath directory exists, rename call will move the sourcePath
@@ -661,20 +674,7 @@ public final class FileUtils {
       throw new IOException("Cannot rename the source path. The destination "
           + "path already exists.");
     }
-
-    if (!inheritPerms) {
-      //just rename the directory
-      return fs.rename(sourcePath, destPath);
-    } else {
-      //rename the directory
-      if (fs.rename(sourcePath, destPath)) {
-        HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, fs, destPath.getParent()), fs, destPath,
-                true);
-        return true;
-      }
-
-      return false;
-    }
+    return fs.rename(sourcePath, destPath);
   }
 
   /**