You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by we...@apache.org on 2017/05/05 17:32:21 UTC

[34/51] [partial] hive git commit: HIVE-14671 : merge master into hive-14535 (Wei Zheng)

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
deleted file mode 100644
index 2ae9cc0..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
+++ /dev/null
@@ -1,792 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.security;
-
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.junit.Assert;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * This test the flag 'hive.warehouse.subdir.inherit.perms'.
- */
-public abstract class FolderPermissionBase {
-  protected static HiveConf conf;
-  protected static Driver driver;
-  protected static String dataFileDir;
-  protected static Path dataFilePath;
-  protected static FileSystem fs;
-
-  protected static Path warehouseDir;
-  protected static Path baseDfsDir;
-
-  protected static final PathFilter hiddenFileFilter = new PathFilter(){
-    public boolean accept(Path p){
-      String name = p.getName();
-      return !name.startsWith("_") && !name.startsWith(".");
-    }
-  };
-
-
-  public abstract void setPermission(String locn, int permIndex) throws Exception;
-
-  public abstract void verifyPermission(String locn, int permIndex) throws Exception;
-
-
-  public void setPermission(String locn) throws Exception {
-    setPermission(locn, 0);
-  }
-
-  public void verifyPermission(String locn) throws Exception {
-    verifyPermission(locn, 0);
-  }
-
-
-  public static void baseSetup() throws Exception {
-    MiniDFSShim dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
-    fs = dfs.getFileSystem();
-    baseDfsDir =  new Path(new Path(fs.getUri()), "/base");
-    fs.mkdirs(baseDfsDir);
-    warehouseDir = new Path(baseDfsDir, "warehouse");
-    fs.mkdirs(warehouseDir);
-    conf.setVar(ConfVars.METASTOREWAREHOUSE, warehouseDir.toString());
-
-    // Assuming the tests are run either in C or D drive in Windows OS!
-    dataFileDir = conf.get("test.data.files").replace('\\', '/')
-        .replace("c:", "").replace("C:", "").replace("D:", "").replace("d:", "");
-    dataFilePath = new Path(dataFileDir, "kv1.txt");
-
-    // Set up scratch directory
-    Path scratchDir = new Path(baseDfsDir, "scratchdir");
-    conf.setVar(HiveConf.ConfVars.SCRATCHDIR, scratchDir.toString());
-
-    //set hive conf vars
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true);
-    conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
-    int port = MetaStoreUtils.findFreePort();
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
-
-    SessionState.start(new CliSessionState(conf));
-    driver = new Driver(conf);
-    setupDataTable();
-  }
-
-
-  private static void setupDataTable() throws Exception {
-    CommandProcessorResponse ret = driver.run("DROP TABLE IF EXISTS mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    ret = driver.run("CREATE TABLE mysrc (key STRING, value STRING) PARTITIONED BY (part1 string, part2 string) STORED AS TEXTFILE");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='1',part2='1')");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='2',part2='2')");
-    Assert.assertEquals(0,ret.getResponseCode());
-  }
-
-  @Before
-  public void setupBeforeTest() throws Exception {
-    driver.run("USE default");
-  }
-
-  @Test
-  public void testCreateDb() throws Exception {
-    //see if db inherits permission from warehouse directory.
-    String testDb = "mydb";
-    String tableName = "createtable";
-
-    setPermission(warehouseDir.toString());
-    verifyPermission(warehouseDir.toString());
-
-    CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + testDb + ".db");
-    verifyPermission(warehouseDir + "/" + testDb + ".db");
-
-    ret = driver.run("USE " + testDb);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
-
-    ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
-    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
-
-    Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) {
-      verifyPermission(child);
-    }
-
-    ret = driver.run("USE default");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    //cleanup after the test.
-    fs.delete(warehouseDir, true);
-    fs.mkdirs(warehouseDir);
-    Assert.assertEquals(listStatus(warehouseDir.toString()).size(), 0);
-    setupDataTable();
-  }
-
-  @Test
-  public void testCreateTable() throws Exception {
-    String testDb = "mydb2";
-    String tableName = "createtable";
-    CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + testDb + ".db");
-    setPermission(warehouseDir + "/" + testDb + ".db");
-    verifyPermission(warehouseDir + "/" + testDb + ".db");
-
-    ret = driver.run("USE " + testDb);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
-
-    ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
-    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
-
-    Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) {
-      verifyPermission(child);
-    }
-
-    ret = driver.run("USE default");
-    Assert.assertEquals(0,ret.getResponseCode());
-  }
-
-
-  @Test
-  public void testInsertNonPartTable() throws Exception {
-    //case 1 is non-partitioned table.
-    String tableName = "nonpart";
-
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    String tableLoc = warehouseDir + "/" + tableName;
-    assertExistence(warehouseDir + "/" + tableName);
-
-    //case1A: insert into non-partitioned table.
-    setPermission(warehouseDir + "/" + tableName);
-    ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    verifyPermission(warehouseDir + "/" + tableName);
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(tableLoc)) {
-      verifyPermission(child);
-    }
-
-    //case1B: insert overwrite non-partitioned-table
-    setPermission(warehouseDir + "/" + tableName, 1);
-    ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    verifyPermission(warehouseDir + "/" + tableName, 1);
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(tableLoc)) {
-      verifyPermission(child, 1);
-    }
-  }
-
-  @Test
-  public void testInsertStaticSinglePartition() throws Exception {
-    String tableName = "singlestaticpart";
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + tableName);
-    setPermission(warehouseDir + "/" + tableName);
-
-    //insert into test
-    ret = driver.run("insert into table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    verifyPermission(warehouseDir + "/" + tableName);
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=1");
-
-    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) {
-      verifyPermission(child);
-    }
-
-    //insert overwrite test
-    setPermission(warehouseDir + "/" + tableName, 1);
-    setPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
-    ret = driver.run("insert overwrite table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    verifyPermission(warehouseDir + "/" + tableName, 1);
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
-
-    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) {
-      verifyPermission(child, 1);
-    }
-  }
-
-  @Test
-  public void testInsertStaticDualPartition() throws Exception {
-    String tableName = "dualstaticpart";
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + tableName);
-    setPermission(warehouseDir + "/" + tableName);
-
-    //insert into test
-    ret = driver.run("insert into table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    verifyPermission(warehouseDir + "/" + tableName);
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=1");
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1");
-
-    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
-      verifyPermission(child);
-    }
-
-    //insert overwrite test
-    setPermission(warehouseDir + "/" + tableName, 1);
-    setPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
-    setPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1);
-
-    ret = driver.run("insert overwrite table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    verifyPermission(warehouseDir + "/" + tableName, 1);
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1);
-
-    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
-      verifyPermission(child, 1);
-    }
-  }
-
-  @Test
-  public void testInsertDualDynamicPartitions() throws Exception {
-    String tableName = "dualdynamicpart";
-
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
-    Assert.assertEquals(0, ret.getResponseCode());
-    assertExistence(warehouseDir + "/" + tableName);
-
-    //Insert into test, with permission set 0.
-    setPermission(warehouseDir + "/" + tableName, 0);
-    ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    verifyDualPartitionTable(warehouseDir + "/" + tableName, 0);
-
-    //Insert overwrite test, with permission set 1.  We need reset existing partitions to 1 since the permissions
-    //should be inherited from existing partition
-    setDualPartitionTable(warehouseDir + "/" + tableName, 1);
-    ret = driver.run("insert overwrite table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    verifyDualPartitionTable(warehouseDir + "/" + tableName, 1);
-  }
-
-  @Test
-  public void testInsertSingleDynamicPartition() throws Exception {
-    String tableName = "singledynamicpart";
-
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)");
-    Assert.assertEquals(0,ret.getResponseCode());
-    String tableLoc = warehouseDir + "/" + tableName;
-    assertExistence(tableLoc);
-
-    //Insert into test, with permission set 0.
-    setPermission(tableLoc, 0);
-    ret = driver.run("insert into table " + tableName + " partition (part1) select key,value,part1 from mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
-    verifySinglePartition(tableLoc, 0);
-
-    //Insert overwrite test, with permission set 1. We need reset existing partitions to 1 since the permissions
-    //should be inherited from existing partition
-    setSinglePartition(tableLoc, 1);
-    ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
-    verifySinglePartition(tableLoc, 1);
-
-    //delete and re-insert using insert overwrite.  There's different code paths insert vs insert overwrite for new tables.
-    ret = driver.run("DROP TABLE " + tableName);
-    Assert.assertEquals(0, ret.getResponseCode());
-    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + tableName);
-    setPermission(warehouseDir + "/" + tableName);
-
-    ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    verifySinglePartition(tableLoc, 0);
-  }
-
-  @Test
-  public void testPartition() throws Exception {
-    String tableName = "alterpart";
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + tableName);
-    setPermission(warehouseDir + "/" + tableName);
-
-    ret = driver.run("insert into table " + tableName + " partition(part1='1',part2='1',part3='1') select key,value from mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + tableName);
-    setPermission(warehouseDir + "/" + tableName, 1);
-
-    //alter partition
-    ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=2", 1);
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2", 1);
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2", 1);
-
-    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) {
-      verifyPermission(child, 1);
-    }
-
-    String tableName2 = "alterpart2";
-    ret = driver.run("CREATE TABLE " + tableName2 + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + tableName2);
-    setPermission(warehouseDir + "/" + tableName2);
-    ret = driver.run("alter table " + tableName2 + " exchange partition (part1='2',part2='2',part3='2') with table " + tableName);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    //alter exchange can not change base table's permission
-    //alter exchange can only control final partition folder's permission
-    verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2", 0);
-    verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2", 0);
-    verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2/part3=2", 1);
-  }
-
-  @Test
-  public void testExternalTable() throws Exception {
-    String tableName = "externaltable";
-
-    String myLocation = warehouseDir + "/myfolder";
-    FileSystem fs = FileSystem.get(new URI(myLocation), conf);
-    fs.mkdirs(new Path(myLocation));
-    setPermission(myLocation);
-
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) LOCATION '" + myLocation + "'");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    Assert.assertTrue(listStatus(myLocation).size() > 0);
-    for (String child : listStatus(myLocation)) {
-      verifyPermission(child);
-    }
-  }
-
-  @Test
-  public void testLoadLocal() throws Exception {
-    //case 1 is non-partitioned table.
-    String tableName = "loadlocal";
-
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    String tableLoc = warehouseDir + "/" + tableName;
-    assertExistence(warehouseDir + "/" + tableName);
-
-    //case1A: load data local into non-partitioned table.
-    setPermission(warehouseDir + "/" + tableName);
-
-    ret = driver.run("load data local inpath '" + dataFilePath + "' into table " + tableName);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(tableLoc)) {
-      verifyPermission(child);
-    }
-
-    //case1B: load data local into overwrite non-partitioned-table
-    setPermission(warehouseDir + "/" + tableName, 1);
-    for (String child : listStatus(tableLoc)) {
-      setPermission(child, 1);
-    }
-    ret = driver.run("load data local inpath '" + dataFilePath + "' overwrite into table " + tableName);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(tableLoc)) {
-      verifyPermission(child, 1);
-    }
-
-    //case 2 is partitioned table.
-    tableName = "loadlocalpartition";
-
-    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)");
-    Assert.assertEquals(0,ret.getResponseCode());
-    tableLoc = warehouseDir + "/" + tableName;
-    assertExistence(tableLoc);
-
-    //case 2A: load data local into partitioned table.
-    setPermission(tableLoc);
-    ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1";
-    Assert.assertTrue(listStatus(partLoc).size() > 0);
-    for (String child : listStatus(partLoc)) {
-      verifyPermission(child);
-    }
-
-    //case 2B: insert data overwrite into partitioned table. set testing table/partition folder hierarchy 1
-    //local load overwrite just overwrite the existing partition content but not the permission
-    setPermission(tableLoc, 1);
-    setPermission(partLoc, 1);
-    for (String child : listStatus(partLoc)) {
-      setPermission(child, 1);
-    }
-    ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(partLoc)) {
-      verifyPermission(child, 1);
-    }
-  }
-
-  @Test
-  public void testLoad() throws Exception {
-    String tableName = "load";
-    String location = "/hdfsPath";
-    fs.copyFromLocalFile(dataFilePath, new Path(location));
-
-    //case 1: load data
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
-    Assert.assertEquals(0,ret.getResponseCode());
-    String tableLoc = warehouseDir + "/" + tableName;
-    assertExistence(warehouseDir + "/" + tableName);
-
-    //case1A: load data into non-partitioned table.
-    setPermission(warehouseDir + "/" + tableName);
-
-    ret = driver.run("load data inpath '" + location + "' into table " + tableName);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(tableLoc)) {
-      verifyPermission(child);
-    }
-
-    //case1B: load data into overwrite non-partitioned-table
-    setPermission(warehouseDir + "/" + tableName, 1);
-    for (String child : listStatus(tableLoc)) {
-      setPermission(child, 1);
-    }
-
-    fs.copyFromLocalFile(dataFilePath, new Path(location));
-    ret = driver.run("load data inpath '" + location + "' overwrite into table " + tableName);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(tableLoc)) {
-      verifyPermission(child, 1);
-    }
-
-    //case 2 is partitioned table.
-    tableName = "loadpartition";
-
-    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)");
-    Assert.assertEquals(0,ret.getResponseCode());
-    tableLoc = warehouseDir + "/" + tableName;
-    assertExistence(tableLoc);
-
-    //case 2A: load data into partitioned table.
-    setPermission(tableLoc);
-    fs.copyFromLocalFile(dataFilePath, new Path(location));
-    ret = driver.run("LOAD DATA INPATH '" + location + "' INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1";
-    Assert.assertTrue(listStatus(partLoc).size() > 0);
-    for (String child : listStatus(partLoc)) {
-      verifyPermission(child);
-    }
-
-    //case 2B: insert data overwrite into partitioned table. set testing table/partition folder hierarchy 1
-    //load overwrite just overwrite the existing partition content but not the permission
-    setPermission(tableLoc, 1);
-    setPermission(partLoc, 1);
-    Assert.assertTrue(listStatus(partLoc).size() > 0);
-    for (String child : listStatus(partLoc)) {
-      setPermission(child, 1);
-    }
-
-    fs.copyFromLocalFile(dataFilePath, new Path(location));
-    ret = driver.run("LOAD DATA INPATH '" + location + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(partLoc)) {
-      verifyPermission(child, 1);
-    }
-  }
-
-  @Test
-  public void testCtas() throws Exception {
-    String testDb = "ctasdb";
-    String tableName = "createtable";
-    CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + testDb + ".db");
-    setPermission(warehouseDir + "/" + testDb + ".db");
-    verifyPermission(warehouseDir + "/" + testDb + ".db");
-
-    ret = driver.run("USE " + testDb);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    ret = driver.run("create table " + tableName + " as select key,value from default.mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
-    verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
-
-    Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) {
-      verifyPermission(child);
-    }
-
-    ret = driver.run("USE default");
-    Assert.assertEquals(0,ret.getResponseCode());
-  }
-
-  @Test
-  public void testExim() throws Exception {
-
-    //export the table to external file.
-    String myLocation = warehouseDir + "/exim";
-    FileSystem fs = FileSystem.get(new URI(myLocation), conf);
-    fs.mkdirs(new Path(myLocation));
-    setPermission(myLocation);
-    myLocation = myLocation + "/temp";
-
-    CommandProcessorResponse ret = driver.run("export table mysrc to '" + myLocation + "'");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    //check if exported data has inherited the permissions.
-    assertExistence(myLocation);
-    verifyPermission(myLocation);
-
-    assertExistence(myLocation + "/part1=1/part2=1");
-    verifyPermission(myLocation + "/part1=1/part2=1");
-    Assert.assertTrue(listStatus(myLocation + "/part1=1/part2=1").size() > 0);
-    for (String child : listStatus(myLocation + "/part1=1/part2=1")) {
-      verifyPermission(child);
-    }
-
-    assertExistence(myLocation + "/part1=2/part2=2");
-    verifyPermission(myLocation + "/part1=2/part2=2");
-    Assert.assertTrue(listStatus(myLocation + "/part1=2/part2=2").size() > 0);
-    for (String child : listStatus(myLocation + "/part1=2/part2=2")) {
-      verifyPermission(child);
-    }
-
-    //import the table back into another database
-    String testDb = "eximdb";
-    ret = driver.run("CREATE DATABASE " + testDb);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    //use another permission for this import location, to verify that it is really set (permIndex=2)
-    assertExistence(warehouseDir + "/" + testDb + ".db");
-    setPermission(warehouseDir + "/" + testDb + ".db", 1);
-
-    ret = driver.run("USE " + testDb);
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    ret = driver.run("import from '" + myLocation + "'");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    //check permissions of imported, from the exported table
-    assertExistence(warehouseDir + "/" + testDb + ".db/mysrc");
-    verifyPermission(warehouseDir + "/" + testDb + ".db/mysrc", 1);
-
-    myLocation = warehouseDir + "/" + testDb + ".db/mysrc";
-    assertExistence(myLocation);
-    verifyPermission(myLocation, 1);
-
-    assertExistence(myLocation + "/part1=1/part2=1");
-    verifyPermission(myLocation + "/part1=1/part2=1", 1);
-    Assert.assertTrue(listStatus(myLocation + "/part1=1/part2=1").size() > 0);
-    for (String child : listStatus(myLocation + "/part1=1/part2=1")) {
-      verifyPermission(child, 1);
-    }
-
-    assertExistence(myLocation + "/part1=2/part2=2");
-    verifyPermission(myLocation + "/part1=2/part2=2", 1);
-    Assert.assertTrue(listStatus(myLocation + "/part1=2/part2=2").size() > 0);
-    for (String child : listStatus(myLocation + "/part1=2/part2=2")) {
-      verifyPermission(child, 1);
-    }
-  }
-
-  /**
-   * Tests the permission to the table doesn't change after the truncation
-   * @throws Exception
-   */
-  @Test
-  public void testTruncateTable() throws Exception {
-    String tableName = "truncatetable";
-    String partition = warehouseDir + "/" + tableName + "/part1=1";
-
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key STRING, value STRING) PARTITIONED BY (part1 INT)");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    setPermission(warehouseDir + "/" + tableName);
-
-    ret = driver.run("insert into table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + tableName);
-
-    verifyPermission(warehouseDir + "/" + tableName);
-    verifyPermission(partition);
-
-    ret = driver.run("TRUNCATE TABLE " + tableName);
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    assertExistence(warehouseDir + "/" + tableName);
-    verifyPermission(warehouseDir + "/" + tableName);
-
-    ret = driver.run("insert into table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    verifyPermission(warehouseDir + "/" + tableName);
-
-    assertExistence(partition);
-    verifyPermission(partition);    
-
-    // Also test the partition folder if the partition is truncated
-    ret = driver.run("TRUNCATE TABLE " + tableName + " partition(part1='1')");
-    Assert.assertEquals(0, ret.getResponseCode());
-
-    assertExistence(partition);
-    verifyPermission(partition);
-  }
-
-  private void setSinglePartition(String tableLoc, int index) throws Exception {
-    setPermission(tableLoc + "/part1=1", index);
-    setPermission(tableLoc + "/part1=2", index);
-  }
-
-  private void verifySinglePartition(String tableLoc, int index) throws Exception {
-    verifyPermission(tableLoc + "/part1=1", index);
-    verifyPermission(tableLoc + "/part1=2", index);
-
-    Assert.assertTrue(listStatus(tableLoc + "/part1=1").size() > 0);
-    for (String child : listStatus(tableLoc + "/part1=1")) {
-      verifyPermission(child, index);
-    }
-
-    Assert.assertTrue(listStatus(tableLoc + "/part1=2").size() > 0);
-    for (String child : listStatus(tableLoc + "/part1=2")) {
-      verifyPermission(child, index);
-    }
-  }
-
-  private void setDualPartitionTable(String baseTablePath, int index) throws Exception {
-    setPermission(baseTablePath, index);
-    setPermission(baseTablePath + "/part1=1", index);
-    setPermission(baseTablePath + "/part1=1/part2=1", index);
-
-    setPermission(baseTablePath + "/part1=2", index);
-    setPermission(baseTablePath + "/part1=2/part2=2", index);
-  }
-
-  private void verifyDualPartitionTable(String baseTablePath, int index) throws Exception {
-    verifyPermission(baseTablePath, index);
-    verifyPermission(baseTablePath + "/part1=1", index);
-    verifyPermission(baseTablePath + "/part1=1/part2=1", index);
-
-    verifyPermission(baseTablePath + "/part1=2", index);
-    verifyPermission(baseTablePath + "/part1=2/part2=2", index);
-
-    Assert.assertTrue(listStatus(baseTablePath + "/part1=1/part2=1").size() > 0);
-    for (String child : listStatus(baseTablePath + "/part1=1/part2=1")) {
-      verifyPermission(child, index);
-    }
-
-    Assert.assertTrue(listStatus(baseTablePath + "/part1=2/part2=2").size() > 0);
-    for (String child : listStatus(baseTablePath + "/part1=2/part2=2")) {
-      verifyPermission(child, index);
-    }
-  }
-
-  private void assertExistence(String locn) throws Exception {
-    Assert.assertTrue(fs.exists(new Path(locn)));
-  }
-
-  private List<String> listStatus(String locn) throws Exception {
-    List<String> results = new ArrayList<String>();
-    FileStatus[] listStatus = fs.listStatus(new Path(locn), hiddenFileFilter);
-    for (FileStatus status : listStatus) {
-      results.add(status.getPath().toString());
-    }
-    return results;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java
deleted file mode 100644
index 6cc2d18..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.security;
-
-import junit.framework.Assert;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestFolderPermissions extends FolderPermissionBase {
-
-  @BeforeClass
-  public static void setup() throws Exception {
-    conf = new HiveConf(TestFolderPermissions.class);
-    conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
-    baseSetup();
-  }
-
-  public FsPermission[] expected = new FsPermission[] {
-     FsPermission.createImmutable((short) 0777),
-     FsPermission.createImmutable((short) 0766)
-  };
-
-  @Override
-  public void setPermission(String locn, int permIndex) throws Exception {
-    fs.setPermission(new Path(locn), expected[permIndex]);
-  }
-
-  @Override
-  public void verifyPermission(String locn, int permIndex) throws Exception {
-    FsPermission actual =  fs.getFileStatus(new Path(locn)).getPermission();
-    Assert.assertEquals(expected[permIndex], actual);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java
deleted file mode 100644
index bb65ee7..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.security;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim;
-import org.apache.hadoop.hive.shims.Utils;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test cases focusing on drop table permission checks
- */
-public class TestStorageBasedMetastoreAuthorizationDrops extends StorageBasedMetastoreTestBase {
-
-  protected static MiniDFSShim dfs = null;
-
-  @Override
-  protected HiveConf createHiveConf() throws Exception {
-    // Hadoop FS ACLs do not work with LocalFileSystem, so set up MiniDFS.
-    HiveConf conf = super.createHiveConf();
-
-    String currentUserName = Utils.getUGI().getShortUserName();
-    conf.set("hadoop.proxyuser." + currentUserName + ".groups", "*");
-    conf.set("hadoop.proxyuser." + currentUserName + ".hosts", "*");
-    dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
-    FileSystem fs = dfs.getFileSystem();
-
-    Path warehouseDir = new Path(new Path(fs.getUri()), "/warehouse");
-    fs.mkdirs(warehouseDir);
-    conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehouseDir.toString());
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true);
-
-    // Set up scratch directory
-    Path scratchDir = new Path(new Path(fs.getUri()), "/scratchdir");
-    conf.setVar(HiveConf.ConfVars.SCRATCHDIR, scratchDir.toString());
-
-    return conf;
-  }
-
-  @Override
-  public void tearDown() throws Exception {
-    super.tearDown();
-
-    if (dfs != null) {
-      dfs.shutdown();
-      dfs = null;
-    }
-  }
-
-  @Test
-  public void testDropDatabase() throws Exception {
-    dropDatabaseByOtherUser("-rwxrwxrwx", 0);
-    dropDatabaseByOtherUser("-rwxrwxrwt", 1);
-  }
-
-  /**
-   * Creates db and tries to drop as 'other' user
-   * @param perm - permission for warehouse dir
-   * @param expectedRet - expected return code for drop by other user
-   * @throws Exception
-   */
-  public void dropDatabaseByOtherUser(String perm, int expectedRet) throws Exception {
-    String dbName = getTestDbName();
-    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), perm);
-
-    CommandProcessorResponse resp = driver.run("create database " + dbName);
-    Assert.assertEquals(0, resp.getResponseCode());
-    Database db = msc.getDatabase(dbName);
-    validateCreateDb(db, dbName);
-
-    InjectableDummyAuthenticator.injectMode(true);
-
-
-    resp = driver.run("drop database " + dbName);
-    Assert.assertEquals(expectedRet, resp.getResponseCode());
-
-  }
-
-  @Test
-  public void testDropTable() throws Exception {
-    dropTableByOtherUser("-rwxrwxrwx", 0);
-    dropTableByOtherUser("-rwxrwxrwt", 1);
-  }
-
-  /**
-   * @param perm dir permission for database dir
-   * @param expectedRet expected return code on drop table
-   * @throws Exception
-   */
-  public void dropTableByOtherUser(String perm, int expectedRet) throws Exception {
-    String dbName = getTestDbName();
-    String tblName = getTestTableName();
-    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx");
-
-    CommandProcessorResponse resp = driver.run("create database " + dbName);
-    Assert.assertEquals(0, resp.getResponseCode());
-    Database db = msc.getDatabase(dbName);
-    validateCreateDb(db, dbName);
-
-    setPermissions(db.getLocationUri(), perm);
-
-    String dbDotTable = dbName + "." + tblName;
-    resp = driver.run("create table " + dbDotTable + "(i int)");
-    Assert.assertEquals(0, resp.getResponseCode());
-
-
-    InjectableDummyAuthenticator.injectMode(true);
-    resp = driver.run("drop table " + dbDotTable);
-    Assert.assertEquals(expectedRet, resp.getResponseCode());
-  }
-
-  /**
-   * Drop view should not be blocked by SBA. View will not have any location to drop.
-   * @throws Exception
-   */
-  @Test
-  public void testDropView() throws Exception {
-    String dbName = getTestDbName();
-    String tblName = getTestTableName();
-    String viewName = "view" + tblName;
-    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx");
-
-    CommandProcessorResponse resp = driver.run("create database " + dbName);
-    Assert.assertEquals(0, resp.getResponseCode());
-    Database db = msc.getDatabase(dbName);
-    validateCreateDb(db, dbName);
-
-    setPermissions(db.getLocationUri(), "-rwxrwxrwt");
-
-    String dbDotTable = dbName + "." + tblName;
-    resp = driver.run("create table " + dbDotTable + "(i int)");
-    Assert.assertEquals(0, resp.getResponseCode());
-
-    String dbDotView = dbName + "." + viewName;
-    resp = driver.run("create view " + dbDotView + " as select * from " +  dbDotTable);
-    Assert.assertEquals(0, resp.getResponseCode());
-
-    resp = driver.run("drop view " + dbDotView);
-    Assert.assertEquals(0, resp.getResponseCode());
-
-    resp = driver.run("drop table " + dbDotTable);
-    Assert.assertEquals(0, resp.getResponseCode());
-  }
-
-  @Test
-  public void testDropPartition() throws Exception {
-    dropPartitionByOtherUser("-rwxrwxrwx", 0);
-    dropPartitionByOtherUser("-rwxrwxrwt", 1);
-  }
-
-  /**
-   * @param perm permissions for table dir
-   * @param expectedRet expected return code
-   * @throws Exception
-   */
-  public void dropPartitionByOtherUser(String perm, int expectedRet) throws Exception {
-    String dbName = getTestDbName();
-    String tblName = getTestTableName();
-    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx");
-
-    CommandProcessorResponse resp = driver.run("create database " + dbName);
-    Assert.assertEquals(0, resp.getResponseCode());
-    Database db = msc.getDatabase(dbName);
-    validateCreateDb(db, dbName);
-    setPermissions(db.getLocationUri(), "-rwxrwxrwx");
-
-    String dbDotTable = dbName + "." + tblName;
-    resp = driver.run("create table " + dbDotTable + "(i int) partitioned by (b string)");
-    Assert.assertEquals(0, resp.getResponseCode());
-    Table tab = msc.getTable(dbName, tblName);
-    setPermissions(tab.getSd().getLocation(), perm);
-
-    resp = driver.run("alter table " + dbDotTable + " add partition (b='2011')");
-    Assert.assertEquals(0, resp.getResponseCode());
-
-    InjectableDummyAuthenticator.injectMode(true);
-    resp = driver.run("alter table " + dbDotTable + " drop partition (b='2011')");
-    Assert.assertEquals(expectedRet, resp.getResponseCode());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java
deleted file mode 100644
index ea631d2..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.security;
-
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.CommandNeedRetryException;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test cases focusing on read table permission checks
- */
-public class TestStorageBasedMetastoreAuthorizationReads extends StorageBasedMetastoreTestBase {
-
-  @Test
-  public void testReadTableSuccess() throws Exception {
-    readTableByOtherUser("-rwxrwxrwx", true);
-  }
-
-  @Test
-  public void testReadTableSuccessWithReadOnly() throws Exception {
-    readTableByOtherUser("-r--r--r--", true);
-  }
-
-  @Test
-  public void testReadTableFailure() throws Exception {
-    readTableByOtherUser("-rwxrwx---", false);
-  }
-
-  /**
-   * @param perm dir permission for table dir
-   * @param isSuccess if command was successful
-   * @throws Exception
-   */
-  private void readTableByOtherUser(String perm, boolean isSuccess) throws Exception {
-    String dbName = getTestDbName();
-    String tblName = getTestTableName();
-    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx");
-
-    CommandProcessorResponse resp = driver.run("create database " + dbName);
-    Assert.assertEquals(0, resp.getResponseCode());
-    Database db = msc.getDatabase(dbName);
-    validateCreateDb(db, dbName);
-
-    setPermissions(db.getLocationUri(), "-rwxrwxrwx");
-
-    String dbDotTable = dbName + "." + tblName;
-    resp = driver.run("create table " + dbDotTable + "(i int) partitioned by (`date` string)");
-    Assert.assertEquals(0, resp.getResponseCode());
-    Table tab = msc.getTable(dbName, tblName);
-    setPermissions(tab.getSd().getLocation(), perm);
-
-    InjectableDummyAuthenticator.injectMode(true);
-
-    testCmd(driver, "DESCRIBE  " + dbDotTable, isSuccess);
-    testCmd(driver, "DESCRIBE EXTENDED  " + dbDotTable, isSuccess);
-    testCmd(driver, "SHOW PARTITIONS  " + dbDotTable, isSuccess);
-    testCmd(driver, "SHOW COLUMNS IN " + tblName + " IN " + dbName, isSuccess);
-    testCmd(driver, "use " + dbName, true);
-    testCmd(driver, "SHOW TABLE EXTENDED LIKE " + tblName, isSuccess);
-
-  }
-
-  @Test
-  public void testReadDbSuccess() throws Exception {
-    readDbByOtherUser("-rwxrwxrwx", true);
-  }
-
-  @Test
-  public void testReadDbFailure() throws Exception {
-    readDbByOtherUser("-rwxrwx---", false);
-  }
-
-
-  /**
-   * @param perm dir permission for database dir
-   * @param isSuccess if command was successful
-   * @throws Exception
-   */
-  private void readDbByOtherUser(String perm, boolean isSuccess) throws Exception {
-    String dbName = getTestDbName();
-    setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), perm);
-
-    CommandProcessorResponse resp = driver.run("create database " + dbName);
-    Assert.assertEquals(0, resp.getResponseCode());
-    Database db = msc.getDatabase(dbName);
-    validateCreateDb(db, dbName);
-    setPermissions(db.getLocationUri(), perm);
-
-    InjectableDummyAuthenticator.injectMode(true);
-
-    testCmd(driver, "DESCRIBE DATABASE " + dbName, isSuccess);
-    testCmd(driver, "DESCRIBE DATABASE EXTENDED " + dbName, isSuccess);
-    testCmd(driver, "SHOW TABLES IN " + dbName, isSuccess);
-    driver.run("use " + dbName);
-    testCmd(driver, "SHOW TABLES ", isSuccess);
-
-  }
-
-  private void testCmd(Driver driver, String cmd, boolean isSuccess)
-      throws CommandNeedRetryException {
-    CommandProcessorResponse resp = driver.run(cmd);
-    Assert.assertEquals(isSuccess, resp.getResponseCode() == 0);
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
index 66ed8ca..e0c05bd 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java
@@ -659,17 +659,17 @@ public class TestCompactor {
       Path resultFile = null;
       for (int i = 0; i < names.length; i++) {
         names[i] = stat[i].getPath().getName();
-        if (names[i].equals("delta_0000001_0000004")) {
+        if (names[i].equals("delta_0000003_0000006")) {
           resultFile = stat[i].getPath();
         }
       }
       Arrays.sort(names);
-      String[] expected = new String[]{"delta_0000001_0000002",
-          "delta_0000001_0000004", "delta_0000003_0000004", "delta_0000005_0000006"};
+      String[] expected = new String[]{"delta_0000003_0000004",
+          "delta_0000003_0000006", "delta_0000005_0000006", "delta_0000007_0000008"};
       if (!Arrays.deepEquals(expected, names)) {
         Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names));
       }
-      checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty,  0, 1L, 4L);
+      checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty,  0, 3L, 6L);
 
     } finally {
       connection.close();
@@ -718,11 +718,11 @@ public class TestCompactor {
       FileStatus[] stat =
           fs.listStatus(new Path(table.getSd().getLocation()), AcidUtils.baseFileFilter);
       if (1 != stat.length) {
-        Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat));
+        Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat));
       }
       String name = stat[0].getPath().getName();
-      Assert.assertEquals(name, "base_0000004");
-      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L);
+      Assert.assertEquals(name, "base_0000006");
+      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L);
     } finally {
       connection.close();
     }
@@ -778,17 +778,17 @@ public class TestCompactor {
       Path resultDelta = null;
       for (int i = 0; i < names.length; i++) {
         names[i] = stat[i].getPath().getName();
-        if (names[i].equals("delta_0000001_0000004")) {
+        if (names[i].equals("delta_0000003_0000006")) {
           resultDelta = stat[i].getPath();
         }
       }
       Arrays.sort(names);
-      String[] expected = new String[]{"delta_0000001_0000002",
-          "delta_0000001_0000004", "delta_0000003_0000004"};
+      String[] expected = new String[]{"delta_0000003_0000004",
+          "delta_0000003_0000006", "delta_0000005_0000006"};
       if (!Arrays.deepEquals(expected, names)) {
         Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names));
       }
-      checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 4L);
+      checkExpectedTxnsPresent(null, new Path[]{resultDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 6L);
     } finally {
       connection.close();
     }
@@ -844,13 +844,13 @@ public class TestCompactor {
         Assert.fail("majorCompactAfterAbort FileStatus[] stat " + Arrays.toString(stat));
       }
       if (1 != stat.length) {
-        Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat));
+        Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat));
       }
       String name = stat[0].getPath().getName();
-      if (!name.equals("base_0000004")) {
-        Assert.fail("majorCompactAfterAbort name " + name + " not equals to base_0000004");
+      if (!name.equals("base_0000006")) {
+        Assert.fail("majorCompactAfterAbort name " + name + " not equals to base_0000006");
       }
-      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L);
+      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L);
     } finally {
       connection.close();
     }
@@ -899,11 +899,11 @@ public class TestCompactor {
       FileStatus[] stat =
           fs.listStatus(new Path(table.getSd().getLocation()), AcidUtils.baseFileFilter);
       if (1 != stat.length) {
-        Assert.fail("Expecting 1 file \"base_0000004\" and found " + stat.length + " files " + Arrays.toString(stat));
+        Assert.fail("Expecting 1 file \"base_0000006\" and found " + stat.length + " files " + Arrays.toString(stat));
       }
       String name = stat[0].getPath().getName();
-      Assert.assertEquals(name, "base_0000004");
-      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 1L, 4L);
+      Assert.assertEquals(name, "base_0000006");
+      checkExpectedTxnsPresent(stat[0].getPath(), null, columnNamesProperty, columnTypesProperty, 0, 3L, 6L);
     } finally {
       connection.close();
     }
@@ -923,18 +923,18 @@ public class TestCompactor {
         " STORED AS ORC  TBLPROPERTIES ('transactional'='true',"
         + "'transactional_properties'='default')", driver);
 
-    // Insert some data -> this will generate only insert deltas and no delete deltas: delta_1_1
+    // Insert some data -> this will generate only insert deltas and no delete deltas: delta_3_3
     executeStatementOnDriver("INSERT INTO " + tblName +"(a,b) VALUES(1, 'foo')", driver);
 
-    // Insert some data -> this will again generate only insert deltas and no delete deltas: delta_2_2
+    // Insert some data -> this will again generate only insert deltas and no delete deltas: delta_4_4
     executeStatementOnDriver("INSERT INTO " + tblName +"(a,b) VALUES(2, 'bar')", driver);
 
-    // Delete some data -> this will generate only delete deltas and no insert deltas: delete_delta_3_3
+    // Delete some data -> this will generate only delete deltas and no insert deltas: delete_delta_5_5
     executeStatementOnDriver("DELETE FROM " + tblName +" WHERE a = 2", driver);
 
     // Now, compact -> Compaction produces a single range for both delta and delete delta
-    // That is, both delta and delete_deltas would be compacted into delta_1_3 and delete_delta_1_3
-    // even though there are only two delta_1_1, delta_2_2 and one delete_delta_3_3.
+    // That is, both delta and delete_deltas would be compacted into delta_3_5 and delete_delta_3_5
+    // even though there are only two delta_3_3, delta_4_4 and one delete_delta_5_5.
     TxnStore txnHandler = TxnUtils.getTxnStore(conf);
     txnHandler.compact(new CompactionRequest(dbName, tblName, CompactionType.MINOR));
     Worker t = new Worker();
@@ -957,16 +957,16 @@ public class TestCompactor {
     Path minorCompactedDelta = null;
     for (int i = 0; i < deltas.length; i++) {
       deltas[i] = stat[i].getPath().getName();
-      if (deltas[i].equals("delta_0000001_0000003")) {
+      if (deltas[i].equals("delta_0000003_0000005")) {
         minorCompactedDelta = stat[i].getPath();
       }
     }
     Arrays.sort(deltas);
-    String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000003", "delta_0000002_0000002_0000"};
+    String[] expectedDeltas = new String[]{"delta_0000003_0000003_0000", "delta_0000003_0000005", "delta_0000004_0000004_0000"};
     if (!Arrays.deepEquals(expectedDeltas, deltas)) {
       Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas));
     }
-    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 2L);
+    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 4L);
 
     // Verify that we have got correct set of delete_deltas.
     FileStatus[] deleteDeltaStat =
@@ -975,16 +975,16 @@ public class TestCompactor {
     Path minorCompactedDeleteDelta = null;
     for (int i = 0; i < deleteDeltas.length; i++) {
       deleteDeltas[i] = deleteDeltaStat[i].getPath().getName();
-      if (deleteDeltas[i].equals("delete_delta_0000001_0000003")) {
+      if (deleteDeltas[i].equals("delete_delta_0000003_0000005")) {
         minorCompactedDeleteDelta = deleteDeltaStat[i].getPath();
       }
     }
     Arrays.sort(deleteDeltas);
-    String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000003", "delete_delta_0000003_0000003_0000"};
+    String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000005", "delete_delta_0000005_0000005_0000"};
     if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) {
       Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas));
     }
-    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDeleteDelta}, columnNamesProperty, columnTypesProperty, 0, 2L, 2L);
+    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDeleteDelta}, columnNamesProperty, columnTypesProperty, 0, 4L, 4L);
   }
 
   @Test
@@ -1034,16 +1034,16 @@ public class TestCompactor {
     Path minorCompactedDelta = null;
     for (int i = 0; i < deltas.length; i++) {
       deltas[i] = stat[i].getPath().getName();
-      if (deltas[i].equals("delta_0000001_0000002")) {
+      if (deltas[i].equals("delta_0000003_0000004")) {
         minorCompactedDelta = stat[i].getPath();
       }
     }
     Arrays.sort(deltas);
-    String[] expectedDeltas = new String[]{"delta_0000001_0000001_0000", "delta_0000001_0000002", "delta_0000002_0000002_0000"};
+    String[] expectedDeltas = new String[]{"delta_0000003_0000003_0000", "delta_0000003_0000004", "delta_0000004_0000004_0000"};
     if (!Arrays.deepEquals(expectedDeltas, deltas)) {
       Assert.fail("Expected: " + Arrays.toString(expectedDeltas) + ", found: " + Arrays.toString(deltas));
     }
-    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 1L, 2L);
+    checkExpectedTxnsPresent(null, new Path[]{minorCompactedDelta}, columnNamesProperty, columnTypesProperty, 0, 3L, 4L);
 
     // Verify that we have got correct set of delete_deltas.
     FileStatus[] deleteDeltaStat =
@@ -1052,12 +1052,12 @@ public class TestCompactor {
     Path minorCompactedDeleteDelta = null;
     for (int i = 0; i < deleteDeltas.length; i++) {
       deleteDeltas[i] = deleteDeltaStat[i].getPath().getName();
-      if (deleteDeltas[i].equals("delete_delta_0000001_0000002")) {
+      if (deleteDeltas[i].equals("delete_delta_0000003_0000004")) {
         minorCompactedDeleteDelta = deleteDeltaStat[i].getPath();
       }
     }
     Arrays.sort(deleteDeltas);
-    String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000002"};
+    String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000004"};
     if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) {
       Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas));
     }
@@ -1111,17 +1111,17 @@ public class TestCompactor {
       Path resultFile = null;
       for (int i = 0; i < names.length; i++) {
         names[i] = stat[i].getPath().getName();
-        if (names[i].equals("delta_0000001_0000004")) {
+        if (names[i].equals("delta_0000003_0000006")) {
           resultFile = stat[i].getPath();
         }
       }
       Arrays.sort(names);
-      String[] expected = new String[]{"delta_0000001_0000002",
-          "delta_0000001_0000004", "delta_0000003_0000004", "delta_0000005_0000006"};
+      String[] expected = new String[]{"delta_0000003_0000004",
+          "delta_0000003_0000006", "delta_0000005_0000006", "delta_0000007_0000008"};
       if (!Arrays.deepEquals(expected, names)) {
         Assert.fail("Expected: " + Arrays.toString(expected) + ", found: " + Arrays.toString(names));
       }
-      checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty,  0, 1L, 4L);
+      checkExpectedTxnsPresent(null, new Path[]{resultFile},columnNamesProperty, columnTypesProperty,  0, 3L, 6L);
 
       // Verify that we have got correct set of delete_deltas also
       FileStatus[] deleteDeltaStat =
@@ -1130,12 +1130,12 @@ public class TestCompactor {
       Path minorCompactedDeleteDelta = null;
       for (int i = 0; i < deleteDeltas.length; i++) {
         deleteDeltas[i] = deleteDeltaStat[i].getPath().getName();
-        if (deleteDeltas[i].equals("delete_delta_0000001_0000004")) {
+        if (deleteDeltas[i].equals("delete_delta_0000003_0000006")) {
           minorCompactedDeleteDelta = deleteDeltaStat[i].getPath();
         }
       }
       Arrays.sort(deleteDeltas);
-      String[] expectedDeleteDeltas = new String[]{"delete_delta_0000001_0000004"};
+      String[] expectedDeleteDeltas = new String[]{"delete_delta_0000003_0000006"};
       if (!Arrays.deepEquals(expectedDeleteDeltas, deleteDeltas)) {
         Assert.fail("Expected: " + Arrays.toString(expectedDeleteDeltas) + ", found: " + Arrays.toString(deleteDeltas));
       }
@@ -1332,6 +1332,16 @@ public class TestCompactor {
       public boolean isValidBase(long txnid) {
         return true;
       }
+
+      @Override
+      public boolean isTxnAborted(long txnid) {
+        return true;
+      }
+
+      @Override
+      public RangeResponse isTxnRangeAborted(long minTxnId, long maxTxnId) {
+        return RangeResponse.ALL;
+      }
     };
 
     OrcInputFormat aif = new OrcInputFormat();

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
index 650c4b7..75f46ec 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java
@@ -1069,4 +1069,17 @@ public class TestBeeLineWithArgs {
       this.shouldMatch = shouldMatch;
     }
   }
+
+  /**
+   * Test that Beeline can handle \\ characters within a string literal. Either at the beginning, middle, or end of the
+   * literal.
+   */
+  @Test
+  public void testBackslashInLiteral() throws Throwable {
+    String SCRIPT_TEXT = "select 'hello\\\\', '\\\\hello', 'hel\\\\lo', '\\\\' as literal;";
+    final String EXPECTED_PATTERN = "hello\\\\\t\\\\hello\thel\\\\lo\t\\\\";
+    List<String> argList = getBaseArgs(miniHS2.getBaseJdbcURL());
+    argList.add("--outputformat=tsv2");
+    testScriptFile(SCRIPT_TEXT, argList, EXPECTED_PATTERN, true);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
index 22630b9..604c234 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
@@ -36,6 +36,7 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaException;
 import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hive.beeline.HiveSchemaHelper.NestedScriptParser;
 import org.apache.hive.beeline.HiveSchemaHelper.PostgresCommandParser;
 
@@ -57,6 +58,10 @@ public class TestSchemaTool extends TestCase {
     hiveConf = new HiveConf(this.getClass());
     schemaTool = new HiveSchemaTool(
         System.getProperty("test.tmp.dir", "target/tmp"), hiveConf, "derby");
+    schemaTool.setUserName(
+        schemaTool.getHiveConf().get(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME.varname));
+    schemaTool.setPassWord(ShimLoader.getHadoopShims().getPassword(schemaTool.getHiveConf(),
+          HiveConf.ConfVars.METASTOREPWD.varname));
     System.setProperty("beeLine.system.exit", "true");
     errStream = System.err;
     outStream = System.out;
@@ -120,8 +125,8 @@ public class TestSchemaTool extends TestCase {
     boolean isValid = (boolean)schemaTool.validateSchemaTables(conn);
     assertTrue(isValid);
 
-    // upgrade to 2.2.0 schema and re-validate
-    schemaTool.doUpgrade("2.2.0");
+    // upgrade from 2.0.0 schema and re-validate
+    schemaTool.doUpgrade("2.0.0");
     isValid = (boolean)schemaTool.validateSchemaTables(conn);
     assertTrue(isValid);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 4a82aa5..6e9223a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hive.ql.exec.UDF;
 import org.apache.hadoop.hive.ql.processors.DfsProcessor;
 import org.apache.hive.common.util.HiveVersionInfo;
 import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
+import org.apache.hive.service.cli.HiveSQLException;
 import org.apache.hive.service.cli.operation.ClassicTableTypeMapping;
 import org.apache.hive.service.cli.operation.ClassicTableTypeMapping.ClassicTableTypes;
 import org.apache.hive.service.cli.operation.HiveTableTypeMapping;
@@ -577,7 +578,7 @@ public class TestJdbcDriver2 {
 
   @Test
   public void testSetOnConnection() throws Exception {
-    Connection connection = getConnection("test?conf1=conf2;conf3=conf4#var1=var2;var3=var4");
+    Connection connection = getConnection(testDbName + "?conf1=conf2;conf3=conf4#var1=var2;var3=var4");
     try {
       verifyConfValue(connection, "conf1", "conf2");
       verifyConfValue(connection, "conf3", "conf4");
@@ -1083,6 +1084,45 @@ public class TestJdbcDriver2 {
   }
 
   @Test
+  public void testShowTablesInDb() throws SQLException {
+    Statement stmt = con.createStatement();
+    assertNotNull("Statement is null", stmt);
+
+    String tableNameInDbUnique = tableName + "_unique";
+    // create a table with a unique name in testDb
+    stmt.execute("drop table if exists " + testDbName + "." + tableNameInDbUnique);
+    stmt.execute("create table " + testDbName + "." + tableNameInDbUnique
+        + " (under_col int comment 'the under column', value string) comment '" + tableComment
+        + "'");
+
+    ResultSet res = stmt.executeQuery("show tables in " + testDbName);
+
+    boolean testTableExists = false;
+    while (res.next()) {
+      assertNotNull("table name is null in result set", res.getString(1));
+      if (tableNameInDbUnique.equalsIgnoreCase(res.getString(1))) {
+        testTableExists = true;
+      }
+    }
+    assertTrue("table name " + tableNameInDbUnique
+        + " not found in SHOW TABLES result set", testTableExists);
+    stmt.execute("drop table if exists " + testDbName + "." + tableNameInDbUnique);
+    stmt.close();
+  }
+
+  @Test
+  public void testInvalidShowTables() throws SQLException {
+    Statement stmt = con.createStatement();
+    assertNotNull("Statement is null", stmt);
+
+    //show tables <dbname> is in invalid show tables syntax. Hive does not return
+    //any tables in this case
+    ResultSet res = stmt.executeQuery("show tables " + testDbName);
+    assertFalse(res.next());
+    stmt.close();
+  }
+
+  @Test
   public void testMetaDataGetTables() throws SQLException {
     getTablesTest(ImmutableSet.of(ClassicTableTypes.TABLE.toString()),
         ClassicTableTypes.VIEW.toString());
@@ -2883,4 +2923,10 @@ public class TestJdbcDriver2 {
     assertEquals(rowCount, dataFileRowCount);
     stmt.execute("drop table " + tblName);
   }
+
+  // Test that opening a JDBC connection to a non-existent database throws a HiveSQLException
+  @Test(expected = HiveSQLException.class)
+  public void testConnectInvalidDatabase() throws SQLException {
+    DriverManager.getConnection("jdbc:hive2:///databasedoesnotexist", "", "");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/187eb760/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index afe23f8..fc2cb08 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -112,6 +112,14 @@ public class TestJdbcWithMiniHS2 {
     stmt.execute("drop database if exists " + testDbName + " cascade");
     stmt.execute("create database " + testDbName);
     stmt.close();
+
+    try {
+      openTestConnections();
+    } catch (Exception e) {
+      System.out.println("Unable to open default connections to MiniHS2: " + e);
+      throw e;
+    }
+
     // tables in test db
     createTestTables(conTestDb, testDbName);
   }
@@ -183,6 +191,7 @@ public class TestJdbcWithMiniHS2 {
     HiveConf conf = new HiveConf();
     startMiniHS2(conf);
     openDefaultConnections();
+    openTestConnections();
   }
 
   private static void startMiniHS2(HiveConf conf) throws Exception {
@@ -208,6 +217,9 @@ public class TestJdbcWithMiniHS2 {
 
   private static void openDefaultConnections() throws Exception {
     conDefault = getConnection();
+  }
+
+  private static void openTestConnections() throws Exception {
     conTestDb = getConnection(testDbName);
   }
 
@@ -966,6 +978,38 @@ public class TestJdbcWithMiniHS2 {
   }
 
   /**
+   * Test for jdbc driver retry on NoHttpResponseException
+   * @throws Exception
+   */
+  @Test
+  public void testHttpRetryOnServerIdleTimeout() throws Exception {
+    // Stop HiveServer2
+    stopMiniHS2();
+    HiveConf conf = new HiveConf();
+    conf.set("hive.server2.transport.mode", "http");
+    // Set server's idle timeout to a very low value
+    conf.set("hive.server2.thrift.http.max.idle.time", "5");
+    startMiniHS2(conf);
+    String userName = System.getProperty("user.name");
+    Connection conn = getConnection(miniHS2.getJdbcURL(testDbName), userName, "password");
+    Statement stmt = conn.createStatement();
+    stmt.execute("select from_unixtime(unix_timestamp())");
+    // Sleep for longer than server's idletimeout and execute a query
+    TimeUnit.SECONDS.sleep(10);
+    try {
+      stmt.execute("select from_unixtime(unix_timestamp())");
+    } catch (Exception e) {
+      fail("Not expecting exception: " + e);
+    } finally {
+      if (conn != null) {
+        conn.close();
+      }
+    }
+    // Restore original state
+    restoreMiniHS2AndConnections();
+  }
+
+  /**
    * Tests that DataNucleus' NucleusContext.classLoaderResolverMap clears cached class objects
    * (& hence doesn't leak classloaders) on closing any session
    *