You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2015/01/19 18:28:54 UTC

svn commit: r1653062 [1/6] - in /hive/trunk: common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ data/scripts/ hbase-handler/src/java/org/apache/hadoop/hive/hbase/ hcatalog/core/src/main/java/org/apache/hive/hcat...

Author: brock
Date: Mon Jan 19 17:28:53 2015
New Revision: 1653062

URL: http://svn.apache.org/r1653062
Log:
HIVE-9264 - Merge encryption branch to trunk (Dong Chen, Ferdinand Xu, Sergio Pena, Chaoyu Tang, Brock Noland via Brock)

Added:
    hive/trunk/data/scripts/q_test_cleanup_for_encryption.sql
    hive/trunk/data/scripts/q_test_init_for_encryption.sql
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
    hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
    hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
    hive/trunk/ql/src/test/queries/clientpositive/encryption_join_unencrypted_tbl.q
    hive/trunk/ql/src/test/queries/clientpositive/encryption_join_with_different_encryption_keys.q
    hive/trunk/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q
    hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q
    hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_unencrypted_tbl.q
    hive/trunk/ql/src/test/results/clientpositive/encrypted/
    hive/trunk/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
    hive/trunk/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
    hive/trunk/ql/src/test/results/clientpositive/encrypted/encryption_join_unencrypted_tbl.q.out
    hive/trunk/ql/src/test/results/clientpositive/encrypted/encryption_join_with_different_encryption_keys.q.out
    hive/trunk/ql/src/test/results/clientpositive/encrypted/encryption_load_data_to_encrypted_tables.q.out
    hive/trunk/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_encrypted_tbl.q.out
    hive/trunk/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_unencrypted_tbl.q.out
Modified:
    hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
    hive/trunk/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
    hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
    hive/trunk/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
    hive/trunk/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
    hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
    hive/trunk/itests/qtest/pom.xml
    hive/trunk/itests/src/test/resources/testconfiguration.properties
    hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java
    hive/trunk/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java
    hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java
    hive/trunk/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
    hive/trunk/ql/src/test/results/clientnegative/fs_default_name2.q.out
    hive/trunk/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out
    hive/trunk/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out
    hive/trunk/ql/src/test/templates/TestCliDriver.vm
    hive/trunk/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
    hive/trunk/shims/0.23/pom.xml
    hive/trunk/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
    hive/trunk/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java Mon Jan 19 17:28:53 2015
@@ -53,17 +53,19 @@ import org.apache.hadoop.util.Shell;
 public final class FileUtils {
   private static final Log LOG = LogFactory.getLog(FileUtils.class.getName());
 
-  /**
-   * Accept all paths.
-   */
-  private static class AcceptAllPathFilter implements PathFilter {
-    @Override
-    public boolean accept(Path path) {
-      return true;
+  public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() {
+    public boolean accept(Path p) {
+      String name = p.getName();
+      return !name.startsWith("_") && !name.startsWith(".");
+    }
+  };
+
+  public static final PathFilter STAGING_DIR_PATH_FILTER = new PathFilter() {
+    public boolean accept(Path p) {
+      String name = p.getName();
+      return !name.startsWith(".");
     }
-  }
-
-  private static final PathFilter allPathFilter = new AcceptAllPathFilter();
+  };
 
   /**
    * Variant of Path.makeQualified that qualifies the input path against the default file system
@@ -319,14 +321,7 @@ public final class FileUtils {
       List<FileStatus> results) throws IOException {
 
     if (fileStatus.isDir()) {
-      for (FileStatus stat : fs.listStatus(fileStatus.getPath(), new PathFilter() {
-
-        @Override
-        public boolean accept(Path p) {
-          String name = p.getName();
-          return !name.startsWith("_") && !name.startsWith(".");
-        }
-      })) {
+      for (FileStatus stat : fs.listStatus(fileStatus.getPath(), HIDDEN_FILES_PATH_FILTER)) {
         listStatusRecursively(fs, stat, results);
       }
     } else {
@@ -366,7 +361,6 @@ public final class FileUtils {
    *             check will be performed within a doAs() block to use the access privileges
    *             of this user. In this case the user must be configured to impersonate other
    *             users, otherwise this check will fail with error.
-   * @param groups  List of groups for the user
    * @throws IOException
    * @throws AccessControlException
    * @throws InterruptedException
@@ -547,10 +541,25 @@ public final class FileUtils {
     boolean deleteSource,
     boolean overwrite,
     HiveConf conf) throws IOException {
-    boolean copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
+
+    HadoopShims shims = ShimLoader.getHadoopShims();
+    boolean copied;
+
+    /* Run distcp if source file/dir is too big */
+    if (srcFS.getUri().getScheme().equals("hdfs") &&
+        srcFS.getFileStatus(src).getLen() > conf.getLongVar(HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE)) {
+      LOG.info("Source is " + srcFS.getFileStatus(src).getLen() + " bytes. (MAX: " + conf.getLongVar(HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE) + ")");
+      LOG.info("Launch distributed copy (distcp) job.");
+      copied = shims.runDistCp(src, dst, conf);
+      if (copied && deleteSource) {
+        srcFS.delete(src, true);
+      }
+    } else {
+      copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
+    }
+
     boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
     if (copied && inheritPerms) {
-      HadoopShims shims = ShimLoader.getHadoopShims();
       HdfsFileStatus fullFileStatus = shims.getFullFileStatus(conf, dstFS, dst);
       try {
         shims.setFullFileStatus(conf, fullFileStatus, dstFS, dst);
@@ -571,7 +580,7 @@ public final class FileUtils {
    * @throws IOException
    */
   public static boolean trashFilesUnderDir(FileSystem fs, Path f, Configuration conf) throws FileNotFoundException, IOException {
-    FileStatus[] statuses = fs.listStatus(f, allPathFilter);
+    FileStatus[] statuses = fs.listStatus(f, HIDDEN_FILES_PATH_FILTER);
     boolean result = true;
     for (FileStatus status : statuses) {
       result = result & moveToTrash(fs, status.getPath(), conf);
@@ -603,6 +612,25 @@ public final class FileUtils {
     return result;
   }
 
+  /**
+   * Check if first path is a subdirectory of second path.
+   * Both paths must belong to the same filesystem.
+   *
+   * @param p1 first path
+   * @param p2 second path
+   * @param fs FileSystem, both paths must belong to the same filesystem
+   * @return
+   */
+  public static boolean isSubDir(Path p1, Path p2, FileSystem fs) {
+    String path1 = fs.makeQualified(p1).toString();
+    String path2 = fs.makeQualified(p2).toString();
+    if (path1.startsWith(path2)) {
+      return true;
+    }
+
+    return false;
+  }
+
   public static boolean renameWithPerms(FileSystem fs, Path sourcePath,
                                Path destPath, boolean inheritPerms,
                                Configuration conf) throws IOException {

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java Mon Jan 19 17:28:53 2015
@@ -70,7 +70,7 @@ public class HiveStatsUtils {
       sb.append(Path.SEPARATOR).append("*");
     }
     Path pathPattern = new Path(path, sb.toString());
-    return fs.globStatus(pathPattern);
+    return fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER);
   }
 
 }

Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Mon Jan 19 17:28:53 2015
@@ -215,6 +215,10 @@ public class HiveConf extends Configurat
     PLAN_SERIALIZATION("hive.plan.serialization.format", "kryo",
         "Query plan format serialization between client and task nodes. \n" +
         "Two supported values are : kryo and javaXML. Kryo is default."),
+    STAGINGDIR("hive.exec.stagingdir", ".hive-staging",
+        "Directory name that will be created inside table locations in order to support HDFS encryption. " +
+        "This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " +
+        "In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."),
     SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive",
         "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " +
         "For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, " +
@@ -742,6 +746,10 @@ public class HiveConf extends Configurat
         "cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
         "assumption that the original group by will reduce the data size."),
 
+    // Max filesize used to do a single copy (after that, distcp is used)
+    HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
+        "Maximum file size (in Mb) that Hive uses to do single HDFS copies between directories." +
+        "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
 
     // for hive udtf operator
     HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,

Added: hive/trunk/data/scripts/q_test_cleanup_for_encryption.sql
URL: http://svn.apache.org/viewvc/hive/trunk/data/scripts/q_test_cleanup_for_encryption.sql?rev=1653062&view=auto
==============================================================================
--- hive/trunk/data/scripts/q_test_cleanup_for_encryption.sql (added)
+++ hive/trunk/data/scripts/q_test_cleanup_for_encryption.sql Mon Jan 19 17:28:53 2015
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS src;

Added: hive/trunk/data/scripts/q_test_init_for_encryption.sql
URL: http://svn.apache.org/viewvc/hive/trunk/data/scripts/q_test_init_for_encryption.sql?rev=1653062&view=auto
==============================================================================
--- hive/trunk/data/scripts/q_test_init_for_encryption.sql (added)
+++ hive/trunk/data/scripts/q_test_init_for_encryption.sql Mon Jan 19 17:28:53 2015
@@ -0,0 +1,5 @@
+DROP TABLE IF EXISTS src;
+
+CREATE TABLE src(key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH "${hiveconf:test.data.dir}/kv1.txt" OVERWRITE INTO TABLE src;

Modified: hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java (original)
+++ hive/trunk/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHFileOutputFormat.java Mon Jan 19 17:28:53 2015
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.KeyValueU
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -149,7 +150,7 @@ public class HiveHFileOutputFormat exten
           fs.mkdirs(columnFamilyPath);
           Path srcDir = outputdir;
           for (;;) {
-            FileStatus [] files = fs.listStatus(srcDir);
+            FileStatus [] files = fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER);
             if ((files == null) || (files.length == 0)) {
               throw new IOException("No family directories found in " + srcDir);
             }
@@ -161,7 +162,7 @@ public class HiveHFileOutputFormat exten
               break;
             }
           }
-          for (FileStatus regionFile : fs.listStatus(srcDir)) {
+          for (FileStatus regionFile : fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER)) {
             fs.rename(
               regionFile.getPath(),
               new Path(

Modified: hive/trunk/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java (original)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java Mon Jan 19 17:28:53 2015
@@ -704,7 +704,7 @@ class FileOutputCommitterContainer exten
 
       //      LOG.info("Searching for "+dynPathSpec);
       Path pathPattern = new Path(dynPathSpec);
-      FileStatus[] status = fs.globStatus(pathPattern);
+      FileStatus[] status = fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER);
 
       partitionsDiscoveredByPath = new LinkedHashMap<String, Map<String, String>>();
       contextDiscoveredByPath = new LinkedHashMap<String, JobContext>();

Modified: hive/trunk/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
URL: http://svn.apache.org/viewvc/hive/trunk/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java (original)
+++ hive/trunk/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java Mon Jan 19 17:28:53 2015
@@ -20,9 +20,7 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;

Modified: hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java (original)
+++ hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java Mon Jan 19 17:28:53 2015
@@ -205,169 +205,218 @@ public abstract class FolderPermissionBa
 
 
   @Test
-  public void testStaticPartition() throws Exception {
-    String tableName = "staticpart";
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
-    Assert.assertEquals(0,ret.getResponseCode());
+  public void testInsertNonPartTable() throws Exception {
+    //case 1 is non-partitioned table.
+    String tableName = "nonpart";
+
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+    Assert.assertEquals(0, ret.getResponseCode());
 
+    String tableLoc = warehouseDir + "/" + tableName;
     assertExistence(warehouseDir + "/" + tableName);
+
+    //case1A: insert into non-partitioned table.
     setPermission(warehouseDir + "/" + tableName);
+    ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
+    Assert.assertEquals(0, ret.getResponseCode());
 
-    ret = driver.run("insert into table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
-    Assert.assertEquals(0,ret.getResponseCode());
+    verifyPermission(warehouseDir + "/" + tableName);
+    Assert.assertTrue(listStatus(tableLoc).size() > 0);
+    for (String child : listStatus(tableLoc)) {
+      verifyPermission(child);
+    }
 
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=1");
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1");
+    //case1B: insert overwrite non-partitioned-table
+    setPermission(warehouseDir + "/" + tableName, 1);
+    ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc");
+    Assert.assertEquals(0, ret.getResponseCode());
 
-    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
-      verifyPermission(child);
+    verifyPermission(warehouseDir + "/" + tableName, 1);
+    Assert.assertTrue(listStatus(tableLoc).size() > 0);
+    for (String child : listStatus(tableLoc)) {
+      verifyPermission(child, 1);
     }
   }
 
   @Test
-  public void testAlterPartition() throws Exception {
-    String tableName = "alterpart";
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
-    Assert.assertEquals(0,ret.getResponseCode());
+  public void testInsertStaticSinglePartition() throws Exception {
+    String tableName = "singlestaticpart";
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)");
+    Assert.assertEquals(0, ret.getResponseCode());
 
     assertExistence(warehouseDir + "/" + tableName);
     setPermission(warehouseDir + "/" + tableName);
 
-    ret = driver.run("insert into table " + tableName + " partition(part1='1',part2='1',part3='1') select key,value from mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
+    //insert into test
+    ret = driver.run("insert into table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
+    Assert.assertEquals(0, ret.getResponseCode());
 
-    assertExistence(warehouseDir + "/" + tableName);
-    setPermission(warehouseDir + "/" + tableName, 1);
+    verifyPermission(warehouseDir + "/" + tableName);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=1");
 
-    //alter partition
-    ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')");
-    Assert.assertEquals(0,ret.getResponseCode());
+    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) {
+      verifyPermission(child);
+    }
 
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=2", 1);
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2", 1);
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2", 1);
+    //insert overwrite test
+    setPermission(warehouseDir + "/" + tableName, 1);
+    ret = driver.run("insert overwrite table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'");
+    Assert.assertEquals(0, ret.getResponseCode());
 
-    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) {
+    verifyPermission(warehouseDir + "/" + tableName, 1);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) {
       verifyPermission(child, 1);
     }
   }
 
-
   @Test
-  public void testDynamicPartitions() throws Exception {
-    String tableName = "dynamicpart";
-
+  public void testInsertStaticDualPartition() throws Exception {
+    String tableName = "dualstaticpart";
     CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
-    Assert.assertEquals(0,ret.getResponseCode());
+    Assert.assertEquals(0, ret.getResponseCode());
 
     assertExistence(warehouseDir + "/" + tableName);
     setPermission(warehouseDir + "/" + tableName);
 
-    ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
+    //insert into test
+    ret = driver.run("insert into table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
+    Assert.assertEquals(0, ret.getResponseCode());
 
+    verifyPermission(warehouseDir + "/" + tableName);
     verifyPermission(warehouseDir + "/" + tableName + "/part1=1");
     verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1");
 
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=2");
-    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2");
-
     Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
     for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
       verifyPermission(child);
     }
 
-    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2").size() > 0);
-    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2")) {
-      verifyPermission(child);
+    //insert overwrite test
+    setPermission(warehouseDir + "/" + tableName, 1);
+    ret = driver.run("insert overwrite table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyPermission(warehouseDir + "/" + tableName, 1);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1);
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
+      verifyPermission(child, 1);
     }
   }
 
   @Test
-  public void testExternalTable() throws Exception {
-    String tableName = "externaltable";
+  public void testInsertDualDynamicPartitions() throws Exception {
+    String tableName = "dualdynamicpart";
 
-    String myLocation = warehouseDir + "/myfolder";
-    FileSystem fs = FileSystem.get(new URI(myLocation), conf);
-    fs.mkdirs(new Path(myLocation));
-    setPermission(myLocation);
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
+    Assert.assertEquals(0, ret.getResponseCode());
+    assertExistence(warehouseDir + "/" + tableName);
 
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) LOCATION '" + myLocation + "'");
-    Assert.assertEquals(0,ret.getResponseCode());
+    //Insert into test, with permission set 0.
+    setPermission(warehouseDir + "/" + tableName, 0);
+    ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc");
+    Assert.assertEquals(0, ret.getResponseCode());
 
-    ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
+    verifyDualPartitionTable(warehouseDir + "/" + tableName, 0);
 
-    Assert.assertTrue(listStatus(myLocation).size() > 0);
-    for (String child : listStatus(myLocation)) {
-      verifyPermission(child);
-    }
+    //Insert overwrite test, with permission set 1.
+    setPermission(warehouseDir + "/" + tableName, 1);
+    ret = driver.run("insert overwrite table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifyDualPartitionTable(warehouseDir + "/" + tableName, 1);
   }
 
   @Test
-  public void testInsert() throws Exception {
-    //case 1 is non-partitioned table.
-    String tableName = "insert";
+  public void testInsertSingleDynamicPartition() throws Exception {
+    String tableName = "singledynamicpart";
 
-    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)");
     Assert.assertEquals(0,ret.getResponseCode());
-
     String tableLoc = warehouseDir + "/" + tableName;
-    assertExistence(warehouseDir + "/" + tableName);
+    assertExistence(tableLoc);
 
-    //case1A: insert into non-partitioned table.
+    //Insert into test, with permission set 0.
+    setPermission(tableLoc, 0);
+    ret = driver.run("insert into table " + tableName + " partition (part1) select key,value,part1 from mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+    verifySinglePartition(tableLoc, 0);
+
+    //Insert overwrite test, with permission set 1.
+    setPermission(tableLoc, 1);
+    ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+    verifySinglePartition(tableLoc, 1);
+
+    //delete and re-insert using insert overwrite.  There's different code paths insert vs insert overwrite for new tables.
+    ret = driver.run("DROP TABLE " + tableName);
+    Assert.assertEquals(0, ret.getResponseCode());
+    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string)");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName);
     setPermission(warehouseDir + "/" + tableName);
-    ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
+
+    ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc");
+    Assert.assertEquals(0, ret.getResponseCode());
+
+    verifySinglePartition(tableLoc, 0);
+  }
+
+  @Test
+  public void testAlterPartition() throws Exception {
+    String tableName = "alterpart";
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
     Assert.assertEquals(0,ret.getResponseCode());
 
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(tableLoc)) {
-      verifyPermission(child);
-    }
+    assertExistence(warehouseDir + "/" + tableName);
+    setPermission(warehouseDir + "/" + tableName);
 
-    //case1B: insert overwrite non-partitioned-table
+    ret = driver.run("insert into table " + tableName + " partition(part1='1',part2='1',part3='1') select key,value from mysrc");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName);
     setPermission(warehouseDir + "/" + tableName, 1);
-    ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc");
+
+    //alter partition
+    ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')");
     Assert.assertEquals(0,ret.getResponseCode());
 
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(tableLoc)) {
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=2", 1);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2", 1);
+    verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2", 1);
+
+    Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0);
+    for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) {
       verifyPermission(child, 1);
     }
+  }
 
-    //case 2 is partitioned table.
-    tableName = "insertpartition";
+  @Test
+  public void testExternalTable() throws Exception {
+    String tableName = "externaltable";
 
-    ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)");
-    Assert.assertEquals(0,ret.getResponseCode());
+    String myLocation = warehouseDir + "/myfolder";
+    FileSystem fs = FileSystem.get(new URI(myLocation), conf);
+    fs.mkdirs(new Path(myLocation));
+    setPermission(myLocation);
 
-    ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc");
+    CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) LOCATION '" + myLocation + "'");
     Assert.assertEquals(0,ret.getResponseCode());
 
-    String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1";
-    assertExistence(partLoc);
-
-    //case 2A: insert into partitioned table.
-    setPermission(partLoc);
-    ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc");
+    ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
     Assert.assertEquals(0,ret.getResponseCode());
 
-    Assert.assertTrue(listStatus(partLoc).size() > 0);
-    for (String child : listStatus(partLoc)) {
+    Assert.assertTrue(listStatus(myLocation).size() > 0);
+    for (String child : listStatus(myLocation)) {
       verifyPermission(child);
     }
-
-    //case 2B: insert into non-partitioned table.
-    setPermission(partLoc, 1);
-    ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc");
-    Assert.assertEquals(0,ret.getResponseCode());
-
-    Assert.assertTrue(listStatus(tableLoc).size() > 0);
-    for (String child : listStatus(partLoc)) {
-      verifyPermission(child, 1);
-    }
   }
 
   @Test
@@ -422,7 +471,7 @@ public abstract class FolderPermissionBa
     }
 
     //case 2B: insert data overwrite into non-partitioned table.
-    setPermission(partLoc, 1);
+    setPermission(tableLoc, 1);
     ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
     Assert.assertEquals(0,ret.getResponseCode());
 
@@ -487,7 +536,7 @@ public abstract class FolderPermissionBa
     }
 
     //case 2B: insert data overwrite into non-partitioned table.
-    setPermission(partLoc, 1);
+    setPermission(tableLoc, 1);
     fs.copyFromLocalFile(dataFilePath, new Path(location));
     ret = driver.run("LOAD DATA INPATH '" + location + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
     Assert.assertEquals(0,ret.getResponseCode());
@@ -596,6 +645,40 @@ public abstract class FolderPermissionBa
     }
   }
 
+  private void verifySinglePartition(String tableLoc, int index) throws Exception {
+    verifyPermission(tableLoc + "/part1=1", index);
+    verifyPermission(tableLoc + "/part1=2", index);
+
+    Assert.assertTrue(listStatus(tableLoc + "/part1=1").size() > 0);
+    for (String child : listStatus(tableLoc + "/part1=1")) {
+      verifyPermission(child, index);
+    }
+
+    Assert.assertTrue(listStatus(tableLoc + "/part1=2").size() > 0);
+    for (String child : listStatus(tableLoc + "/part1=2")) {
+      verifyPermission(child, index);
+    }
+  }
+
+  private void verifyDualPartitionTable(String baseTablePath, int index) throws Exception {
+    verifyPermission(baseTablePath, index);
+    verifyPermission(baseTablePath + "/part1=1", index);
+    verifyPermission(baseTablePath + "/part1=1/part2=1", index);
+
+    verifyPermission(baseTablePath + "/part1=2", index);
+    verifyPermission(baseTablePath + "/part1=2/part2=2", index);
+
+    Assert.assertTrue(listStatus(baseTablePath + "/part1=1/part2=1").size() > 0);
+    for (String child : listStatus(baseTablePath + "/part1=1/part2=1")) {
+      verifyPermission(child, index);
+    }
+
+    Assert.assertTrue(listStatus(baseTablePath + "/part1=2/part2=2").size() > 0);
+    for (String child : listStatus(baseTablePath + "/part1=2/part2=2")) {
+      verifyPermission(child, index);
+    }
+  }
+
   private void assertExistence(String locn) throws Exception {
     Assert.assertTrue(fs.exists(new Path(locn)));
   }

Modified: hive/trunk/itests/qtest/pom.xml
URL: http://svn.apache.org/viewvc/hive/trunk/itests/qtest/pom.xml?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/itests/qtest/pom.xml (original)
+++ hive/trunk/itests/qtest/pom.xml Mon Jan 19 17:28:53 2015
@@ -435,7 +435,7 @@
                   templatePath="${basedir}/${hive.path.to.root}/ql/src/test/templates/" template="TestCliDriver.vm"
                   queryDirectory="${basedir}/${hive.path.to.root}/ql/src/test/queries/clientpositive/"
                   queryFile="${qfile}"
-                  excludeQueryFile="${minimr.query.files},${minitez.query.files}"
+                  excludeQueryFile="${minimr.query.files},${minitez.query.files},${encrypted.query.files}"
                   queryFileRegex="${qfile_regex}"
                   clusterMode="${clustermode}"
                   runDisabled="${run_disabled}"
@@ -516,6 +516,24 @@
                               hadoopVersion="${active.hadoop.version}"
                               initScript="q_test_init.sql"
                               cleanupScript="q_test_cleanup.sql"/>
+
+                    <qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
+                              outputDirectory="${project.build.directory}/generated-test-sources/java/org/apache/hadoop/hive/cli/"
+                              templatePath="${basedir}/${hive.path.to.root}/ql/src/test/templates/" template="TestCliDriver.vm"
+                              queryDirectory="${basedir}/${hive.path.to.root}/ql/src/test/queries/clientpositive/"
+                              queryFile="${qfile}"
+                              includeQueryFile="${encrypted.query.files}"
+                              queryFileRegex="${qfile_regex}"
+                              clusterMode="encrypted"
+                              runDisabled="${run_disabled}"
+                              hiveConfDir="${basedir}/${hive.path.to.root}/data/conf"
+                              resultsDirectory="${basedir}/${hive.path.to.root}/ql/src/test/results/clientpositive/encrypted"
+                              className="TestEncryptedHDFSCliDriver"
+                              logFile="${project.build.directory}/testencryptedhdfsclidrivergen.log"
+                              logDirectory="${project.build.directory}/qfile-results/clientpositive/"
+                              hadoopVersion="${active.hadoop.version}"
+                              initScript="q_test_init_for_encryption.sql"
+                              cleanupScript="q_test_cleanup_for_encryption.sql"/>
                   </then>
                   <else>
                   </else>

Modified: hive/trunk/itests/src/test/resources/testconfiguration.properties
URL: http://svn.apache.org/viewvc/hive/trunk/itests/src/test/resources/testconfiguration.properties?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/itests/src/test/resources/testconfiguration.properties (original)
+++ hive/trunk/itests/src/test/resources/testconfiguration.properties Mon Jan 19 17:28:53 2015
@@ -298,6 +298,14 @@ minitez.query.files=bucket_map_join_tez1
   vectorized_dynamic_partition_pruning.q,\
   tez_multi_union.q
 
+encrypted.query.files=encryption_join_unencrypted_tbl.q,\
+  encryption_insert_partition_static.q,\
+  encryption_insert_partition_dynamic.q,\
+  encryption_join_with_different_encryption_keys.q,\
+  encryption_select_read_only_encrypted_tbl.q,\
+  encryption_select_read_only_unencrypted_tbl.q,\
+  encryption_load_data_to_encrypted_tables.q
+
 beeline.positive.exclude=add_part_exist.q,\
   alter1.q,\
   alter2.q,\

Modified: hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java Mon Jan 19 17:28:53 2015
@@ -38,8 +38,9 @@ import java.io.OutputStreamWriter;
 import java.io.PrintStream;
 import java.io.Serializable;
 import java.io.StringWriter;
-import java.lang.System;
+import java.lang.RuntimeException;
 import java.net.URL;
+import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -55,6 +56,7 @@ import java.util.regex.Pattern;
 
 import junit.framework.Assert;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
@@ -86,6 +88,10 @@ import org.apache.hadoop.hive.ql.parse.P
 import org.apache.hadoop.hive.ql.parse.ParseException;
 import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.processors.CommandProcessor;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.processors.HiveCommand;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.ShimLoader;
@@ -105,10 +111,16 @@ import com.google.common.collect.Immutab
 public class QTestUtil {
 
   public static final String UTF_8 = "UTF-8";
+
+  // security property names
+  private static final String SECURITY_KEY_PROVIDER_URI_NAME = "dfs.encryption.key.provider.uri";
+  private static final String CRLF = System.getProperty("line.separator");
+
   private static final Log LOG = LogFactory.getLog("QTestUtil");
   private static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES";
   private final String defaultInitScript = "q_test_init.sql";
   private final String defaultCleanupScript = "q_test_cleanup.sql";
+  private final String[] testOnlyCommands = new String[]{"crypto"};
 
   private String testWarehouse;
   private final String testFiles;
@@ -133,6 +145,8 @@ public class QTestUtil {
   private CliDriver cliDriver;
   private HadoopShims.MiniMrShim mr = null;
   private HadoopShims.MiniDFSShim dfs = null;
+  private HadoopShims.HdfsEncryptionShim hes = null;
+  private boolean miniMr = false;
   private String hadoopVer = null;
   private QTestSetup setup = null;
   private boolean isSessionStateStarted = false;
@@ -257,7 +271,7 @@ public class QTestUtil {
 
     // Plug verifying metastore in for testing.
     conf.setVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL,
-        "org.apache.hadoop.hive.metastore.VerifyingObjectStore");
+      "org.apache.hadoop.hive.metastore.VerifyingObjectStore");
 
     if (mr != null) {
       assert dfs != null;
@@ -284,6 +298,7 @@ public class QTestUtil {
     mr,
     tez,
     spark,
+    encrypted,
     none;
 
     public static MiniClusterType valueForString(String type) {
@@ -293,6 +308,8 @@ public class QTestUtil {
         return tez;
       } else if (type.equals("spark")) {
         return spark;
+      } else if (type.equals("encrypted")) {
+        return encrypted;
       } else {
         return none;
       }
@@ -305,6 +322,15 @@ public class QTestUtil {
     this(outDir, logDir, clusterType, null, hadoopVer, initScript, cleanupScript);
   }
 
+  private String getKeyProviderURI() {
+    // Use the target directory if it is not specified
+    String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
+    String keyDir = HIVE_ROOT + "ql/target/";
+
+    // put the jks file in the current test path only for test purpose
+    return "jceks://file" + new Path(keyDir, "test.jks").toUri();
+  }
+
   public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
       String confDir, String hadoopVer, String initScript, String cleanupScript)
     throws Exception {
@@ -329,8 +355,25 @@ public class QTestUtil {
     int numberOfDataNodes = 4;
 
     if (clusterType != MiniClusterType.none && clusterType != MiniClusterType.spark) {
-      dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null);
-      FileSystem fs = dfs.getFileSystem();
+      FileSystem fs = null;
+
+      if (clusterType == MiniClusterType.encrypted) {
+        // Set the security key provider so that the MiniDFS cluster is initialized
+        // with encryption
+        conf.set(SECURITY_KEY_PROVIDER_URI_NAME, getKeyProviderURI());
+
+        dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null);
+        fs = dfs.getFileSystem();
+
+        // set up the java key provider for encrypted hdfs cluster
+        hes = shims.createHdfsEncryptionShim(fs, conf);
+
+        LOG.info("key provider is initialized");
+      } else {
+        dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null);
+        fs = dfs.getFileSystem();
+      }
+
       String uriString = WindowsPathUtil.getHdfsUriString(fs.getUri().toString());
       if (clusterType == MiniClusterType.tez) {
         mr = shims.getMiniTezCluster(conf, 4, uriString, 1);
@@ -346,7 +389,6 @@ public class QTestUtil {
     if (dataDir == null) {
       dataDir = new File(".").getAbsolutePath() + "/data/files";
     }
-
     testFiles = dataDir;
 
     // Use the current directory if it is not specified
@@ -371,7 +413,10 @@ public class QTestUtil {
   }
 
   public void shutdown() throws Exception {
-    cleanUp();
+    if (System.getenv(QTEST_LEAVE_FILES) == null) {
+      cleanUp();
+    }
+
     setup.tearDown();
     if (mr != null) {
       mr.shutdown();
@@ -578,6 +623,19 @@ public class QTestUtil {
     setup.postTest(conf);
   }
 
+  public void clearKeysCreatedInTests() {
+    if (hes == null) {
+      return;
+    }
+    try {
+      for (String keyAlias : hes.getKeys()) {
+        hes.deleteKey(keyAlias);
+      }
+    } catch (IOException e) {
+      LOG.error("Fail to clean the keys created in test due to the error", e);
+    }
+  }
+
   /**
    * Clear out any side effects of running tests
    */
@@ -648,12 +706,17 @@ public class QTestUtil {
     }
 
     clearTablesCreatedDuringTests();
+    clearKeysCreatedInTests();
+
+    if (clusterType != MiniClusterType.encrypted) {
+      // allocate and initialize a new conf since a test can
+      // modify conf by using 'set' commands
+      conf = new HiveConf (Driver.class);
+      initConf();
+      // renew the metastore since the cluster type is unencrypted
+      db = Hive.get(conf);  // propagate new conf to meta store
+    }
 
-    // allocate and initialize a new conf since a test can
-    // modify conf by using 'set' commands
-    conf = new HiveConf (Driver.class);
-    initConf();
-    db = Hive.get(conf);  // propagate new conf to meta store
     setup.preTest(conf);
   }
 
@@ -666,6 +729,7 @@ public class QTestUtil {
     }
 
     clearTablesCreatedDuringTests();
+    clearKeysCreatedInTests();
 
     SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", true);
 
@@ -866,23 +930,131 @@ public class QTestUtil {
     try {
       return drv.run(qMap.get(tname)).getResponseCode();
     } catch (CommandNeedRetryException e) {
-      // TODO Auto-generated catch block
+      LOG.error("driver failed to run the command: " + tname + " due to the exception: ", e);
       e.printStackTrace();
       return -1;
     }
   }
 
-  private static final String CRLF = System.getProperty("line.separator");
   public int executeClient(String tname1, String tname2) {
-    String commands = getCommands(tname1) + CRLF + getCommands(tname2);
-    return cliDriver.processLine(commands);
+    String commands = getCommand(tname1) + CRLF + getCommand(tname2);
+    return executeClientInternal(commands);
   }
 
   public int executeClient(String tname) {
-    return cliDriver.processLine(getCommands(tname));
+    return executeClientInternal(getCommand(tname));
   }
 
-  private String getCommands(String tname) {
+  private int executeClientInternal(String commands) {
+    String [] cmds = commands.split(";");
+    int rc = 0;
+
+    String command = "";
+    for (String oneCmd : cmds) {
+      if (StringUtils.endsWith(oneCmd, "\\")) {
+        command += StringUtils.chop(oneCmd) + "\\;";
+        continue;
+      } else {
+        if (isHiveCommand(oneCmd)) {
+          command = oneCmd;
+        } else {
+          command += oneCmd;
+        }
+      }
+      if (StringUtils.isBlank(command)) {
+        continue;
+      }
+
+      if (isCommandUsedForTesting(command)) {
+        rc = executeTestCommand(command);
+      } else {
+        rc = cliDriver.processLine(command);
+      }
+
+      if (rc != 0) {
+        break;
+      }
+      command = "";
+    }
+
+    return rc;
+  }
+
+  private boolean isHiveCommand(String command) {
+    String[] cmd = command.trim().split("\\s+");
+    if (HiveCommand.find(cmd) != null) {
+      return true;
+    } else if (HiveCommand.find(cmd, HiveCommand.ONLY_FOR_TESTING) != null) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  private int executeTestCommand(final String command) {
+    String commandName = command.trim().split("\\s+")[0];
+    String commandArgs = command.trim().substring(commandName.length());
+
+    if (commandArgs.endsWith(";")) {
+      commandArgs = StringUtils.chop(commandArgs);
+    }
+
+    //replace ${hiveconf:hive.metastore.warehouse.dir} with actual dir if existed.
+    //we only want the absolute path, so remove the header, such as hdfs://localhost:57145
+    String wareHouseDir = SessionState.get().getConf().getVar(ConfVars.METASTOREWAREHOUSE)
+        .replaceAll("^[a-zA-Z]+://.*?:\\d+", "");
+    commandArgs = commandArgs.replaceAll("\\$\\{hiveconf:hive\\.metastore\\.warehouse\\.dir\\}",
+      wareHouseDir);
+
+    enableTestOnlyCmd(SessionState.get().getConf());
+
+    try {
+      CommandProcessor proc = getTestCommand(commandName);
+      if (proc != null) {
+        CommandProcessorResponse response = proc.run(commandArgs.trim());
+
+        int rc = response.getResponseCode();
+        if (rc != 0) {
+          SessionState.get().out.println(response);
+        }
+
+        return rc;
+      } else {
+        throw new RuntimeException("Could not get CommandProcessor for command: " + commandName);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException("Could not execute test command: " + e.getMessage());
+    }
+  }
+
+  private CommandProcessor getTestCommand(final String commandName) throws SQLException {
+    HiveCommand testCommand = HiveCommand.find(new String[]{commandName}, HiveCommand.ONLY_FOR_TESTING);
+
+    if (testCommand == null) {
+      return null;
+    }
+
+    return CommandProcessorFactory
+      .getForHiveCommandInternal(new String[]{commandName}, SessionState.get().getConf(),
+        testCommand.isOnlyForTesting());
+  }
+
+  private void enableTestOnlyCmd(HiveConf conf){
+    StringBuilder securityCMDs = new StringBuilder(conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST));
+    for(String c : testOnlyCommands){
+      securityCMDs.append(",");
+      securityCMDs.append(c);
+    }
+    conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), securityCMDs.toString());
+  }
+
+  private boolean isCommandUsedForTesting(final String command) {
+    String commandName = command.trim().split("\\s+")[0];
+    HiveCommand testCommand = HiveCommand.find(new String[]{commandName}, HiveCommand.ONLY_FOR_TESTING);
+    return testCommand != null;
+  }
+
+  private String getCommand(String tname) {
     String commands = qMap.get(tname);
     StringBuilder newCommands = new StringBuilder(commands.length());
     int lastMatchEnd = 0;
@@ -897,6 +1069,11 @@ public class QTestUtil {
     return commands;
   }
 
+  private boolean isComment(final String line) {
+    String lineTrimmed = line.trim();
+    return lineTrimmed.startsWith("#") || lineTrimmed.startsWith("--");
+  }
+
   public boolean shouldBeSkipped(String tname) {
     return qSkipSet.contains(tname);
   }
@@ -921,7 +1098,7 @@ public class QTestUtil {
 
     // Move all data from dest4_sequencefile to dest4
     drv
-        .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*");
+      .run("FROM dest4_sequencefile INSERT OVERWRITE TABLE dest4 SELECT dest4_sequencefile.*");
 
     // Drop dest4_sequencefile
     db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, "dest4_sequencefile",
@@ -1173,6 +1350,7 @@ public class QTestUtil {
 
   private void maskPatterns(Pattern[] patterns, String fname) throws Exception {
     String maskPattern = "#### A masked pattern was here ####";
+    String partialMaskPattern = "#### A PARTIAL masked pattern was here ####";
 
     String line;
     BufferedReader in;
@@ -1186,9 +1364,24 @@ public class QTestUtil {
     out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file), "UTF-8"));
 
     boolean lastWasMasked = false;
+    boolean partialMaskWasMatched = false;
+    Matcher matcher;
     while (null != (line = in.readLine())) {
-      for (Pattern pattern : patterns) {
-        line = pattern.matcher(line).replaceAll(maskPattern);
+      if (clusterType == MiniClusterType.encrypted) {
+        for (Pattern pattern : partialReservedPlanMask) {
+          matcher = pattern.matcher(line);
+          if (matcher.find()) {
+            line = partialMaskPattern + " " + matcher.group(0);
+            partialMaskWasMatched = true;
+            break;
+          }
+        }
+      }
+
+      if (!partialMaskWasMatched) {
+        for (Pattern pattern : patterns) {
+          line = pattern.matcher(line).replaceAll(maskPattern);
+        }
       }
 
       if (line.equals(maskPattern)) {
@@ -1202,6 +1395,7 @@ public class QTestUtil {
         out.write(line);
         out.write("\n");
         lastWasMasked = false;
+        partialMaskWasMatched = false;
       }
     }
 
@@ -1241,7 +1435,13 @@ public class QTestUtil {
       ".*DagName:.*",
       ".*Input:.*/data/files/.*",
       ".*Output:.*/data/files/.*",
-      ".*total number of created files now is.*"
+      ".*total number of created files now is.*",
+      ".*.hive-staging.*"
+  });
+
+  private final Pattern[] partialReservedPlanMask = toPattern(new String[] {
+      "data/warehouse/(.*?/)+\\.hive-staging"  // the directory might be db/table/partition
+      //TODO: add more expected test result here
   });
 
   public int checkCliDriverResults(String tname) throws Exception {
@@ -1640,8 +1840,10 @@ public class QTestUtil {
   }
 
   public static void outputTestFailureHelpMessage() {
-    System.err.println("See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, "
-        + "or check ./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific test cases logs.");
+    System.err.println(
+      "See ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, or check " +
+        "./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific " +
+        "test cases logs.");
     System.err.flush();
   }
 

Modified: hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java (original)
+++ hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java Mon Jan 19 17:28:53 2015
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hive.ql.hooks;
 
+import java.util.Arrays;
 import java.io.IOException;
 
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -30,7 +32,7 @@ public class VerifyTableDirectoryIsEmpty
     for (WriteEntity output : hookContext.getOutputs()) {
       Path tableLocation = new Path(output.getTable().getDataLocation().toString());
       FileSystem fs = tableLocation.getFileSystem(SessionState.get().getConf());
-      assert(fs.listStatus(tableLocation).length == 0);
+      assert(fs.listStatus(tableLocation, FileUtils.HIDDEN_FILES_PATH_FILTER).length == 0);
     }
   }
 }

Modified: hive/trunk/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java (original)
+++ hive/trunk/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java Mon Jan 19 17:28:53 2015
@@ -184,7 +184,7 @@ public class SkewedInfo implements org.a
 
         __this__skewedColValueLocationMaps.put(__this__skewedColValueLocationMaps_copy_key, __this__skewedColValueLocationMaps_copy_value);
       }
-      this.skewedColValueLocationMaps = __this__skewedColValueLocationMaps;
+        this.skewedColValueLocationMaps = __this__skewedColValueLocationMaps;
     }
   }
 

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Context.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Context.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Context.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Context.java Mon Jan 19 17:28:53 2015
@@ -90,7 +90,7 @@ public class Context {
   protected int tryCount = 0;
   private TokenRewriteStream tokenRewriteStream;
 
-  String executionId;
+  private String executionId;
 
   // List of Locks for this query
   protected List<HiveLock> hiveLocks;
@@ -112,6 +112,8 @@ public class Context {
   private final Map<WriteEntity, List<HiveLockObj>> outputLockObjects =
       new HashMap<WriteEntity, List<HiveLockObj>>();
 
+  private final String stagingDir;
+
   public Context(Configuration conf) throws IOException {
     this(conf, generateExecutionId());
   }
@@ -129,6 +131,7 @@ public class Context {
     nonLocalScratchPath = new Path(SessionState.getHDFSSessionPath(conf), executionId);
     localScratchDir = new Path(SessionState.getLocalSessionPath(conf), executionId).toUri().getPath();
     scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION);
+    stagingDir = HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR);
   }
 
 
@@ -188,6 +191,65 @@ public class Context {
   }
 
   /**
+   * Gets a temporary staging directory related to a path.
+   * If a path already contains a staging directory, then returns the current directory; otherwise
+   * create the directory if needed.
+   *
+   * @param inputPath URI of the temporary directory
+   * @param mkdir Create the directory if True.
+   * @return A temporary path.
+   */
+  private Path getStagingDir(Path inputPath, boolean mkdir) {
+    final URI inputPathUri = inputPath.toUri();
+    final String inputPathName = inputPathUri.getPath();
+    final String fileSystem = inputPathUri.getScheme() + ":" + inputPathUri.getAuthority();
+    final FileSystem fs;
+
+    try {
+      fs = inputPath.getFileSystem(conf);
+    } catch (IOException e) {
+      throw new IllegalStateException("Error getting FileSystem for " + inputPath + ": "+ e, e);
+    }
+
+    String stagingPathName;
+    if (inputPathName.indexOf(stagingDir) == -1) {
+      stagingPathName = new Path(inputPathName, stagingDir).toString();
+    } else {
+      stagingPathName = inputPathName.substring(0, inputPathName.indexOf(stagingDir) + stagingDir.length());
+    }
+
+    final String key = fileSystem + "-" + stagingPathName + "-" + TaskRunner.getTaskRunnerID();
+
+    Path dir = fsScratchDirs.get(key);
+    if (dir == null) {
+      // Append task specific info to stagingPathName, instead of creating a sub-directory.
+      // This way we don't have to worry about deleting the stagingPathName separately at
+      // end of query execution.
+      dir = fs.makeQualified(new Path(stagingPathName + "_" + this.executionId + "-" + TaskRunner.getTaskRunnerID()));
+
+      LOG.debug("Created staging dir = " + dir + " for path = " + inputPath);
+
+      if (mkdir) {
+        try {
+          if (!FileUtils.mkdir(fs, dir, true, conf)) {
+            throw new IllegalStateException("Cannot create staging directory  '" + dir.toString() + "'");
+          }
+
+          if (isHDFSCleanup) {
+            fs.deleteOnExit(dir);
+          }
+        } catch (IOException e) {
+          throw new RuntimeException("Cannot create staging directory '" + dir.toString() + "': " + e.getMessage(), e);
+        }
+      }
+
+      fsScratchDirs.put(key, dir);
+    }
+
+    return dir;
+  }
+
+  /**
    * Get a tmp directory on specified URI
    *
    * @param scheme Scheme of the target FS
@@ -274,14 +336,13 @@ public class Context {
   }
 
   private Path getExternalScratchDir(URI extURI) {
-    return getScratchDir(extURI.getScheme(), extURI.getAuthority(),
-        !explain, nonLocalScratchPath.toUri().getPath());
+    return getStagingDir(new Path(extURI.getScheme(), extURI.getAuthority(), extURI.getPath()), !explain);
   }
 
   /**
    * Remove any created scratch directories.
    */
-  private void removeScratchDir() {
+  public void removeScratchDir() {
     for (Map.Entry<String, Path> entry : fsScratchDirs.entrySet()) {
       try {
         Path p = entry.getValue();
@@ -313,6 +374,10 @@ public class Context {
         (uriStr.indexOf(MR_PREFIX) != -1);
   }
 
+  public Path getMRTmpPath(URI uri) {
+    return new Path(getStagingDir(new Path(uri), !explain), MR_PREFIX + nextPathId());
+  }
+
   /**
    * Get a path to store map-reduce intermediate data in.
    *
@@ -333,10 +398,9 @@ public class Context {
   }
 
   /**
-   * Get a path to store tmp data destined for external URI.
+   * Get a path to store tmp data destined for external Path.
    *
-   * @param extURI
-   *          external URI to which the tmp data has to be eventually moved
+   * @param path external Path to which the tmp data has to be eventually moved
    * @return next available tmp path on the file system corresponding extURI
    */
   public Path getExternalTmpPath(Path path) {
@@ -357,9 +421,7 @@ public class Context {
    * path within /tmp
    */
   public Path getExtTmpPathRelTo(Path path) {
-    URI uri = path.toUri();
-    return new Path (getScratchDir(uri.getScheme(), uri.getAuthority(), !explain,
-        uri.getPath() + Path.SEPARATOR + "_" + this.executionId), EXT_PREFIX + nextPathId());
+    return new Path(getStagingDir(path, !explain), EXT_PREFIX + nextPathId());
   }
 
   /**
@@ -437,7 +499,7 @@ public class Context {
         resFs = resDir.getFileSystem(conf);
         FileStatus status = resFs.getFileStatus(resDir);
         assert status.isDir();
-        FileStatus[] resDirFS = resFs.globStatus(new Path(resDir + "/*"));
+        FileStatus[] resDirFS = resFs.globStatus(new Path(resDir + "/*"), FileUtils.HIDDEN_FILES_PATH_FILTER);
         resDirPaths = new Path[resDirFS.length];
         int pos = 0;
         for (FileStatus resFS : resDirFS) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Mon Jan 19 17:28:53 2015
@@ -637,10 +637,10 @@ public class FetchOperator implements Se
     boolean recursive = HiveConf.getBoolVar(job, HiveConf.ConfVars.HADOOPMAPREDINPUTDIRRECURSIVE);
     // If this is in acid format always read it recursively regardless of what the jobconf says.
     if (!recursive && !AcidUtils.isAcid(p, job)) {
-      return fs.listStatus(p);
+      return fs.listStatus(p, FileUtils.HIDDEN_FILES_PATH_FILTER);
     }
     List<FileStatus> results = new ArrayList<FileStatus>();
-    for (FileStatus stat : fs.listStatus(p)) {
+    for (FileStatus stat : fs.listStatus(p, FileUtils.HIDDEN_FILES_PATH_FILTER)) {
       FileUtils.listStatusRecursively(fs, stat, results);
     }
     return results.toArray(new FileStatus[results.size()]);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Mon Jan 19 17:28:53 2015
@@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
@@ -58,6 +59,8 @@ import org.apache.hadoop.hive.ql.plan.Ma
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.util.StringUtils;
 
 import java.io.IOException;
@@ -99,7 +102,7 @@ public class MoveTask extends Task<MoveW
         if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_INSERT_INTO_MULTILEVEL_DIRS)) {
           deletePath = createTargetPath(targetPath, fs);
         }
-        if (!Hive.renameFile(conf, sourcePath, targetPath, fs, true, false)) {
+        if (!Hive.moveFile(conf, sourcePath, targetPath, fs, true, false)) {
           try {
             if (deletePath != null) {
               fs.delete(deletePath, true);
@@ -158,8 +161,14 @@ public class MoveTask extends Task<MoveW
         actualPath = actualPath.getParent();
       }
       fs.mkdirs(mkDirPath);
+      HadoopShims shims = ShimLoader.getHadoopShims();
       if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS)) {
-        fs.setPermission(mkDirPath, fs.getFileStatus(actualPath).getPermission());
+        try {
+          HadoopShims.HdfsFileStatus status = shims.getFullFileStatus(conf, fs, actualPath);
+          shims.setFullFileStatus(conf, status, fs, actualPath);
+        } catch (Exception e) {
+          LOG.warn("Error setting permissions or group of " + actualPath, e);
+        }
       }
     }
     return deletePath;
@@ -259,7 +268,7 @@ public class MoveTask extends Task<MoveW
             dirs = srcFs.globStatus(tbd.getSourcePath());
             files = new ArrayList<FileStatus>();
             for (int i = 0; (dirs != null && i < dirs.length); i++) {
-              files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath())));
+              files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER)));
               // We only check one file, so exit the loop when we have at least
               // one.
               if (files.size() > 0) {

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Mon Jan 19 17:28:53 2015
@@ -94,6 +94,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.HiveInterruptCallback;
 import org.apache.hadoop.hive.common.HiveInterruptUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
@@ -1801,7 +1802,7 @@ public final class Utilities {
    */
   public static FileStatus[] listStatusIfExists(Path path, FileSystem fs) throws IOException {
     try {
-      return fs.listStatus(path);
+      return fs.listStatus(path, FileUtils.HIDDEN_FILES_PATH_FILTER);
     } catch (FileNotFoundException e) {
       // FS in hadoop 2.0 throws FNF instead of returning null
       return null;
@@ -2637,7 +2638,7 @@ public final class Utilities {
     FileSystem inpFs = dirPath.getFileSystem(job);
 
     if (inpFs.exists(dirPath)) {
-      FileStatus[] fStats = inpFs.listStatus(dirPath);
+      FileStatus[] fStats = inpFs.listStatus(dirPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
       if (fStats.length > 0) {
         return false;
       }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java Mon Jan 19 17:28:53 2015
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable;
@@ -98,7 +99,7 @@ public class HiveIndexResult {
         FileSystem fs = indexFilePath.getFileSystem(conf);
         FileStatus indexStat = fs.getFileStatus(indexFilePath);
         if (indexStat.isDir()) {
-          FileStatus[] fss = fs.listStatus(indexFilePath);
+          FileStatus[] fss = fs.listStatus(indexFilePath, FileUtils.HIDDEN_FILES_PATH_FILTER);
           for (FileStatus f : fss) {
             paths.add(f.getPath());
           }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java Mon Jan 19 17:28:53 2015
@@ -27,7 +27,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
@@ -92,14 +91,7 @@ public class BucketizedHiveInputFormat<K
     List<IOException> errors = new ArrayList<IOException>();
 
     FileSystem fs = dir.getFileSystem(job);
-    FileStatus[] matches = fs.globStatus(dir, new PathFilter() {
-
-      @Override
-      public boolean accept(Path p) {
-        String name = p.getName();
-        return !name.startsWith("_") && !name.startsWith(".");
-      }
-    });
+    FileStatus[] matches = fs.globStatus(dir, FileUtils.HIDDEN_FILES_PATH_FILTER);
     if (matches == null) {
       errors.add(new IOException("Input path does not exist: " + dir));
     } else if (matches.length == 0) {
@@ -113,7 +105,8 @@ public class BucketizedHiveInputFormat<K
     if (!errors.isEmpty()) {
       throw new InvalidInputException(errors);
     }
-    LOG.info("Total input paths to process : " + result.size());
+    LOG.debug("Matches for " + dir + ": " + result);
+    LOG.info("Total input paths to process : " + result.size() + " from dir " + dir);
     return result.toArray(new FileStatus[result.size()]);
 
   }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java Mon Jan 19 17:28:53 2015
@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -397,7 +398,7 @@ public class CombineHiveInputFormat<K ex
 
           while (dirs.peek() != null) {
             Path tstPath = dirs.remove();
-            FileStatus[] fStatus = inpFs.listStatus(tstPath);
+            FileStatus[] fStatus = inpFs.listStatus(tstPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
             for (int idx = 0; idx < fStatus.length; idx++) {
               if (fStatus[idx].isDir()) {
                 dirs.offer(fStatus[idx].getPath());

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java Mon Jan 19 17:28:53 2015
@@ -29,6 +29,7 @@ import java.util.Map;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
@@ -58,7 +59,7 @@ public class SymbolicInputFormat impleme
         if (!fStatus.isDir()) {
           symlinks = new FileStatus[] { fStatus };
         } else {
-          symlinks = fileSystem.listStatus(symlinkDir);
+          symlinks = fileSystem.listStatus(symlinkDir, FileUtils.HIDDEN_FILES_PATH_FILTER);
         }
         toRemovePaths.add(path);
         ArrayList<String> aliases = pathToAliases.remove(path);

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java Mon Jan 19 17:28:53 2015
@@ -23,19 +23,15 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
-import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.FileInputFormat;
@@ -192,7 +188,7 @@ public class SymlinkTextInputFormat exte
       List<Path> targetPaths, List<Path> symlinkPaths) throws IOException {
     for (Path symlinkDir : symlinksDirs) {
       FileSystem fileSystem = symlinkDir.getFileSystem(conf);
-      FileStatus[] symlinks = fileSystem.listStatus(symlinkDir);
+      FileStatus[] symlinks = fileSystem.listStatus(symlinkDir, FileUtils.HIDDEN_FILES_PATH_FILTER);
 
       // Read paths from each symlink file.
       for (FileStatus symlink : symlinks) {