You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/11/17 23:36:48 UTC

svn commit: r1640247 [1/2] - in /hive/branches/HIVE-8065: common/src/java/org/apache/hadoop/hive/common/ common/src/java/org/apache/hadoop/hive/conf/ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/ itests/util/src/main/java/org/apache/h...

Author: brock
Date: Mon Nov 17 22:36:47 2014
New Revision: 1640247

URL: http://svn.apache.org/r1640247
Log:
HIVE-8750 - Commit initial encryption work (Sergio Pena via Brock)

Modified:
    hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
    hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
    hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    hive/branches/HIVE-8065/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
    hive/branches/HIVE-8065/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
    hive/branches/HIVE-8065/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    hive/branches/HIVE-8065/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java
    hive/branches/HIVE-8065/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
    hive/branches/HIVE-8065/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java
    hive/branches/HIVE-8065/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
    hive/branches/HIVE-8065/ql/src/test/results/clientnegative/fs_default_name2.q.out
    hive/branches/HIVE-8065/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
    hive/branches/HIVE-8065/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
    hive/branches/HIVE-8065/shims/0.23/pom.xml
    hive/branches/HIVE-8065/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
    hive/branches/HIVE-8065/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java

Modified: hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/common/FileUtils.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/common/FileUtils.java (original)
+++ hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/common/FileUtils.java Mon Nov 17 22:36:47 2014
@@ -51,17 +51,13 @@ import org.apache.hadoop.util.Shell;
 public final class FileUtils {
   private static final Log LOG = LogFactory.getLog(FileUtils.class.getName());
 
-  /**
-   * Accept all paths.
-   */
-  private static class AcceptAllPathFilter implements PathFilter {
-    @Override
-    public boolean accept(Path path) {
-      return true;
+  public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() {
+    public boolean accept(Path p) {
+      String name = p.getName();
+      return !name.startsWith("_") && !name.startsWith(".");
     }
-  }
+  };
 
-  private static final PathFilter allPathFilter = new AcceptAllPathFilter();
 
   /**
    * Variant of Path.makeQualified that qualifies the input path against the default file system
@@ -317,14 +313,7 @@ public final class FileUtils {
       List<FileStatus> results) throws IOException {
 
     if (fileStatus.isDir()) {
-      for (FileStatus stat : fs.listStatus(fileStatus.getPath(), new PathFilter() {
-
-        @Override
-        public boolean accept(Path p) {
-          String name = p.getName();
-          return !name.startsWith("_") && !name.startsWith(".");
-        }
-      })) {
+      for (FileStatus stat : fs.listStatus(fileStatus.getPath(), HIDDEN_FILES_PATH_FILTER)) {
         listStatusRecursively(fs, stat, results);
       }
     } else {
@@ -364,7 +353,6 @@ public final class FileUtils {
    *             check will be performed within a doAs() block to use the access privileges
    *             of this user. In this case the user must be configured to impersonate other
    *             users, otherwise this check will fail with error.
-   * @param groups  List of groups for the user
    * @throws IOException
    * @throws AccessControlException
    * @throws InterruptedException
@@ -544,10 +532,24 @@ public final class FileUtils {
     boolean deleteSource,
     boolean overwrite,
     HiveConf conf) throws IOException {
-    boolean copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
+
+    HadoopShims shims = ShimLoader.getHadoopShims();
+    boolean copied;
+
+    /* Run distcp if source file/dir is too big */
+    if (srcFS.getFileStatus(src).getLen() > conf.getLongVar(HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE)) {
+      LOG.info("Source is " + srcFS.getFileStatus(src).getLen() + " bytes. (MAX: " + conf.getLongVar(HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE) + ")");
+      LOG.info("Launch distributed copy (distcp) job.");
+      copied = shims.runDistCp(src, dst, conf);
+      if (copied && deleteSource) {
+        srcFS.delete(src, true);
+      }
+    } else {
+      copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
+    }
+
     boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
     if (copied && inheritPerms) {
-      HadoopShims shims = ShimLoader.getHadoopShims();
       HdfsFileStatus fullFileStatus = shims.getFullFileStatus(conf, dstFS, dst);
       try {
         shims.setFullFileStatus(conf, fullFileStatus, dstFS, dst);
@@ -568,7 +570,7 @@ public final class FileUtils {
    * @throws IOException
    */
   public static boolean trashFilesUnderDir(FileSystem fs, Path f, Configuration conf) throws FileNotFoundException, IOException {
-    FileStatus[] statuses = fs.listStatus(f, allPathFilter);
+    FileStatus[] statuses = fs.listStatus(f, HIDDEN_FILES_PATH_FILTER);
     boolean result = true;
     for (FileStatus status : statuses) {
       result = result & moveToTrash(fs, status.getPath(), conf);
@@ -600,6 +602,25 @@ public final class FileUtils {
     return result;
   }
 
+  /**
+   * Check if first path is a subdirectory of second path.
+   * Both paths must belong to the same filesystem.
+   *
+   * @param p1 first path
+   * @param p2 second path
+   * @param fs FileSystem, both paths must belong to the same filesystem
+   * @return
+   */
+  public static boolean isSubDir(Path p1, Path p2, FileSystem fs) {
+    String path1 = fs.makeQualified(p1).toString();
+    String path2 = fs.makeQualified(p2).toString();
+    if (path1.startsWith(path2)) {
+      return true;
+    }
+
+    return false;
+  }
+
   public static boolean renameWithPerms(FileSystem fs, Path sourcePath,
                                Path destPath, boolean inheritPerms,
                                Configuration conf) throws IOException {

Modified: hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java (original)
+++ hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java Mon Nov 17 22:36:47 2014
@@ -70,7 +70,7 @@ public class HiveStatsUtils {
       sb.append(Path.SEPARATOR).append("*");
     }
     Path pathPattern = new Path(path, sb.toString());
-    return fs.globStatus(pathPattern);
+    return fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER);
   }
 
 }

Modified: hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
+++ hive/branches/HIVE-8065/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Mon Nov 17 22:36:47 2014
@@ -205,6 +205,10 @@ public class HiveConf extends Configurat
     PLAN_SERIALIZATION("hive.plan.serialization.format", "kryo",
         "Query plan format serialization between client and task nodes. \n" +
         "Two supported values are : kryo and javaXML. Kryo is default."),
+    STAGINGDIR("hive.exec.stagingdir", ".hive-staging",
+        "Directory name that will be created inside table locations in order to support HDFS encryption." +
+        "This is the replacement of ${hive.exec.scratchdir} for all Hive statements that deals with +" +
+        "HDFS encryption zones. ${hive.exec.scratchdir} is still used for other temporary, such as job plans."),
     SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive",
         "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " +
         "For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/<username> is created, " +
@@ -726,6 +730,10 @@ public class HiveConf extends Configurat
         "cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
         "assumption that the original group by will reduce the data size."),
 
+    // Max filesize used to do a single copy (after that, distcp is used)
+    HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
+        "Maximum file size (in Mb) that Hive uses to do single HDFS copies between directories." +
+        "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
 
     // for hive udtf operator
     HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,

Modified: hive/branches/HIVE-8065/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java (original)
+++ hive/branches/HIVE-8065/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java Mon Nov 17 22:36:47 2014
@@ -696,7 +696,7 @@ class FileOutputCommitterContainer exten
 
       //      LOG.info("Searching for "+dynPathSpec);
       Path pathPattern = new Path(dynPathSpec);
-      FileStatus[] status = fs.globStatus(pathPattern);
+      FileStatus[] status = fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER);
 
       partitionsDiscoveredByPath = new LinkedHashMap<String, Map<String, String>>();
       contextDiscoveredByPath = new LinkedHashMap<String, JobContext>();

Modified: hive/branches/HIVE-8065/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java (original)
+++ hive/branches/HIVE-8065/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputFormatContainer.java Mon Nov 17 22:36:47 2014
@@ -20,9 +20,7 @@
 package org.apache.hive.hcatalog.mapreduce;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;

Modified: hive/branches/HIVE-8065/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/branches/HIVE-8065/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java Mon Nov 17 22:36:47 2014
@@ -365,7 +365,10 @@ public class QTestUtil {
   }
 
   public void shutdown() throws Exception {
-    cleanUp();
+    if (System.getenv(QTEST_LEAVE_FILES) == null) {
+      cleanUp();
+    }
+    
     setup.tearDown();
     if (mr != null) {
       mr.shutdown();
@@ -1178,7 +1181,8 @@ public class QTestUtil {
       ".*DagName:.*",
       ".*Input:.*/data/files/.*",
       ".*Output:.*/data/files/.*",
-      ".*total number of created files now is.*"
+      ".*total number of created files now is.*",
+      ".*.hive-staging.*"
   });
 
   public int checkCliDriverResults(String tname) throws Exception {

Modified: hive/branches/HIVE-8065/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java (original)
+++ hive/branches/HIVE-8065/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyTableDirectoryIsEmptyHook.java Mon Nov 17 22:36:47 2014
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hive.ql.hooks;
 
+import java.util.Arrays;
 import java.io.IOException;
 
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -30,7 +32,7 @@ public class VerifyTableDirectoryIsEmpty
     for (WriteEntity output : hookContext.getOutputs()) {
       Path tableLocation = new Path(output.getTable().getDataLocation().toString());
       FileSystem fs = tableLocation.getFileSystem(SessionState.get().getConf());
-      assert(fs.listStatus(tableLocation).length == 0);
+      assert(fs.listStatus(tableLocation, FileUtils.HIDDEN_FILES_PATH_FILTER).length == 0);
     }
   }
 }

Modified: hive/branches/HIVE-8065/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java (original)
+++ hive/branches/HIVE-8065/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java Mon Nov 17 22:36:47 2014
@@ -184,7 +184,7 @@ public class SkewedInfo implements org.a
 
         __this__skewedColValueLocationMaps.put(__this__skewedColValueLocationMaps_copy_key, __this__skewedColValueLocationMaps_copy_value);
       }
-      this.skewedColValueLocationMaps = __this__skewedColValueLocationMaps;
+        this.skewedColValueLocationMaps = __this__skewedColValueLocationMaps;
     }
   }
 

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/Context.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/Context.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/Context.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/Context.java Mon Nov 17 22:36:47 2014
@@ -90,7 +90,7 @@ public class Context {
   protected int tryCount = 0;
   private TokenRewriteStream tokenRewriteStream;
 
-  String executionId;
+  private String executionId;
 
   // List of Locks for this query
   protected List<HiveLock> hiveLocks;
@@ -112,6 +112,8 @@ public class Context {
   private final Map<WriteEntity, List<HiveLockObj>> outputLockObjects =
       new HashMap<WriteEntity, List<HiveLockObj>>();
 
+  private final String stagingDir;
+
   public Context(Configuration conf) throws IOException {
     this(conf, generateExecutionId());
   }
@@ -129,6 +131,7 @@ public class Context {
     nonLocalScratchPath = new Path(SessionState.getHDFSSessionPath(conf), executionId);
     localScratchDir = new Path(SessionState.getLocalSessionPath(conf), executionId).toUri().getPath();
     scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION);
+    stagingDir = HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR);
   }
 
 
@@ -188,6 +191,65 @@ public class Context {
   }
 
   /**
+   * Gets a temporary staging directory related to a path.
+   * If a path already contains a staging directory, then returns the current directory; otherwise
+   * create the directory if needed.
+   *
+   * @param inputPath URI of the temporary directory
+   * @param mkdir Create the directory if True.
+   * @return A temporary path.
+   */
+  private Path getStagingDir(Path inputPath, boolean mkdir) {
+    final URI inputPathUri = inputPath.toUri();
+    final String inputPathName = inputPathUri.getPath();
+    final String fileSystem = inputPathUri.getScheme() + ":" + inputPathUri.getAuthority();
+    final FileSystem fs;
+
+    try {
+      fs = inputPath.getFileSystem(conf);
+    } catch (IOException e) {
+      throw new IllegalStateException("Error getting FileSystem for " + inputPath + ": "+ e, e);
+    }
+
+    String stagingPathName;
+    if (inputPathName.indexOf(stagingDir) == -1) {
+      stagingPathName = new Path(inputPathName, stagingDir).toString();
+    } else {
+      stagingPathName = inputPathName.substring(0, inputPathName.indexOf(stagingDir) + stagingDir.length());
+    }
+
+    final String key = fileSystem + "-" + stagingPathName + "-" + TaskRunner.getTaskRunnerID();
+
+    Path dir = fsScratchDirs.get(key);
+    if (dir == null) {
+      // Append task specific info to stagingPathName, instead of creating a sub-directory.
+      // This way we don't have to worry about deleting the stagingPathName separately at
+      // end of query execution.
+      dir = fs.makeQualified(new Path(stagingPathName + "_" + this.executionId + "-" + TaskRunner.getTaskRunnerID()));
+
+      LOG.debug("Created staging dir = " + dir + " for path = " + inputPath);
+
+      if (mkdir) {
+        try {
+          if (!FileUtils.mkdir(fs, dir, true, conf)) {
+            throw new IllegalStateException("Cannot create staging directory  '" + dir.toString() + "'");
+          }
+
+          if (isHDFSCleanup) {
+            fs.deleteOnExit(dir);
+          }
+        } catch (IOException e) {
+          throw new RuntimeException("Cannot create staging directory '" + dir.toString() + "': " + e.getMessage(), e);
+        }
+      }
+
+      fsScratchDirs.put(key, dir);
+    }
+
+    return dir;
+  }
+
+  /**
    * Get a tmp directory on specified URI
    *
    * @param scheme Scheme of the target FS
@@ -274,14 +336,13 @@ public class Context {
   }
 
   private Path getExternalScratchDir(URI extURI) {
-    return getScratchDir(extURI.getScheme(), extURI.getAuthority(),
-        !explain, nonLocalScratchPath.toUri().getPath());
+    return getStagingDir(new Path(extURI.getScheme(), extURI.getAuthority(), extURI.getPath()), !explain);
   }
 
   /**
    * Remove any created scratch directories.
    */
-  private void removeScratchDir() {
+  public void removeScratchDir() {
     for (Map.Entry<String, Path> entry : fsScratchDirs.entrySet()) {
       try {
         Path p = entry.getValue();
@@ -313,6 +374,10 @@ public class Context {
         (uriStr.indexOf(MR_PREFIX) != -1);
   }
 
+  public Path getMRTmpPath(URI uri) {
+    return new Path(getStagingDir(new Path(uri), !explain), MR_PREFIX + nextPathId());
+  }
+
   /**
    * Get a path to store map-reduce intermediate data in.
    *
@@ -333,10 +398,9 @@ public class Context {
   }
 
   /**
-   * Get a path to store tmp data destined for external URI.
+   * Get a path to store tmp data destined for external Path.
    *
-   * @param extURI
-   *          external URI to which the tmp data has to be eventually moved
+   * @param path external Path to which the tmp data has to be eventually moved
    * @return next available tmp path on the file system corresponding extURI
    */
   public Path getExternalTmpPath(Path path) {
@@ -357,9 +421,7 @@ public class Context {
    * path within /tmp
    */
   public Path getExtTmpPathRelTo(Path path) {
-    URI uri = path.toUri();
-    return new Path (getScratchDir(uri.getScheme(), uri.getAuthority(), !explain,
-        uri.getPath() + Path.SEPARATOR + "_" + this.executionId), EXT_PREFIX + nextPathId());
+    return new Path(getStagingDir(path, !explain), EXT_PREFIX + nextPathId());
   }
 
   /**
@@ -437,7 +499,7 @@ public class Context {
         resFs = resDir.getFileSystem(conf);
         FileStatus status = resFs.getFileStatus(resDir);
         assert status.isDir();
-        FileStatus[] resDirFS = resFs.globStatus(new Path(resDir + "/*"));
+        FileStatus[] resDirFS = resFs.globStatus(new Path(resDir + "/*"), FileUtils.HIDDEN_FILES_PATH_FILTER);
         resDirPaths = new Path[resDirFS.length];
         int pos = 0;
         for (FileStatus resFS : resDirFS) {

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java Mon Nov 17 22:36:47 2014
@@ -780,10 +780,10 @@ public class FetchOperator implements Se
     boolean recursive = HiveConf.getBoolVar(job, HiveConf.ConfVars.HADOOPMAPREDINPUTDIRRECURSIVE);
     // If this is in acid format always read it recursively regardless of what the jobconf says.
     if (!recursive && !AcidUtils.isAcid(p, job)) {
-      return fs.listStatus(p);
+      return fs.listStatus(p, FileUtils.HIDDEN_FILES_PATH_FILTER);
     }
     List<FileStatus> results = new ArrayList<FileStatus>();
-    for (FileStatus stat : fs.listStatus(p)) {
+    for (FileStatus stat : fs.listStatus(p, FileUtils.HIDDEN_FILES_PATH_FILTER)) {
       FileUtils.listStatusRecursively(fs, stat, results);
     }
     return results.toArray(new FileStatus[results.size()]);

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java Mon Nov 17 22:36:47 2014
@@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
@@ -99,7 +100,7 @@ public class MoveTask extends Task<MoveW
         if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_INSERT_INTO_MULTILEVEL_DIRS)) {
           deletePath = createTargetPath(targetPath, fs);
         }
-        if (!Hive.renameFile(conf, sourcePath, targetPath, fs, true, false)) {
+        if (!Hive.moveFile(conf, sourcePath, targetPath, fs, true, false)) {
           try {
             if (deletePath != null) {
               fs.delete(deletePath, true);
@@ -259,7 +260,7 @@ public class MoveTask extends Task<MoveW
             dirs = srcFs.globStatus(tbd.getSourcePath());
             files = new ArrayList<FileStatus>();
             for (int i = 0; (dirs != null && i < dirs.length); i++) {
-              files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath())));
+              files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER)));
               // We only check one file, so exit the loop when we have at least
               // one.
               if (files.size() > 0) {

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Mon Nov 17 22:36:47 2014
@@ -94,6 +94,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.HiveInterruptCallback;
 import org.apache.hadoop.hive.common.HiveInterruptUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
@@ -1776,7 +1777,7 @@ public final class Utilities {
    */
   public static FileStatus[] listStatusIfExists(Path path, FileSystem fs) throws IOException {
     try {
-      return fs.listStatus(path);
+      return fs.listStatus(path, FileUtils.HIDDEN_FILES_PATH_FILTER);
     } catch (FileNotFoundException e) {
       // FS in hadoop 2.0 throws FNF instead of returning null
       return null;
@@ -2612,7 +2613,7 @@ public final class Utilities {
     FileSystem inpFs = dirPath.getFileSystem(job);
 
     if (inpFs.exists(dirPath)) {
-      FileStatus[] fStats = inpFs.listStatus(dirPath);
+      FileStatus[] fStats = inpFs.listStatus(dirPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
       if (fStats.length > 0) {
         return false;
       }

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexResult.java Mon Nov 17 22:36:47 2014
@@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde2.columnar.BytesRefWritable;
@@ -98,7 +99,7 @@ public class HiveIndexResult {
         FileSystem fs = indexFilePath.getFileSystem(conf);
         FileStatus indexStat = fs.getFileStatus(indexFilePath);
         if (indexStat.isDir()) {
-          FileStatus[] fss = fs.listStatus(indexFilePath);
+          FileStatus[] fss = fs.listStatus(indexFilePath, FileUtils.HIDDEN_FILES_PATH_FILTER);
           for (FileStatus f : fss) {
             paths.add(f.getPath());
           }

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/BucketizedHiveInputFormat.java Mon Nov 17 22:36:47 2014
@@ -27,7 +27,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
@@ -92,14 +91,7 @@ public class BucketizedHiveInputFormat<K
     List<IOException> errors = new ArrayList<IOException>();
 
     FileSystem fs = dir.getFileSystem(job);
-    FileStatus[] matches = fs.globStatus(dir, new PathFilter() {
-
-      @Override
-      public boolean accept(Path p) {
-        String name = p.getName();
-        return !name.startsWith("_") && !name.startsWith(".");
-      }
-    });
+    FileStatus[] matches = fs.globStatus(dir, FileUtils.HIDDEN_FILES_PATH_FILTER);
     if (matches == null) {
       errors.add(new IOException("Input path does not exist: " + dir));
     } else if (matches.length == 0) {
@@ -113,7 +105,8 @@ public class BucketizedHiveInputFormat<K
     if (!errors.isEmpty()) {
       throw new InvalidInputException(errors);
     }
-    LOG.info("Total input paths to process : " + result.size());
+    LOG.debug("Matches for " + dir + ": " + result);
+    LOG.info("Total input paths to process : " + result.size() + " from dir " + dir);
     return result.toArray(new FileStatus[result.size()]);
 
   }

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java Mon Nov 17 22:36:47 2014
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -344,7 +345,7 @@ public class CombineHiveInputFormat<K ex
 
           while (dirs.peek() != null) {
             Path tstPath = dirs.remove();
-            FileStatus[] fStatus = inpFs.listStatus(tstPath);
+            FileStatus[] fStatus = inpFs.listStatus(tstPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
             for (int idx = 0; idx < fStatus.length; idx++) {
               if (fStatus[idx].isDir()) {
                 dirs.offer(fStatus[idx].getPath());

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/SymbolicInputFormat.java Mon Nov 17 22:36:47 2014
@@ -29,6 +29,7 @@ import java.util.Map;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
@@ -58,7 +59,7 @@ public class SymbolicInputFormat impleme
         if (!fStatus.isDir()) {
           symlinks = new FileStatus[] { fStatus };
         } else {
-          symlinks = fileSystem.listStatus(symlinkDir);
+          symlinks = fileSystem.listStatus(symlinkDir, FileUtils.HIDDEN_FILES_PATH_FILTER);
         }
         toRemovePaths.add(path);
         ArrayList<String> aliases = pathToAliases.remove(path);

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/io/SymlinkTextInputFormat.java Mon Nov 17 22:36:47 2014
@@ -23,19 +23,15 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
-import org.apache.hadoop.hive.ql.plan.MapredWork;
-import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.FileInputFormat;
@@ -192,7 +188,7 @@ public class SymlinkTextInputFormat exte
       List<Path> targetPaths, List<Path> symlinkPaths) throws IOException {
     for (Path symlinkDir : symlinksDirs) {
       FileSystem fileSystem = symlinkDir.getFileSystem(conf);
-      FileStatus[] symlinks = fileSystem.listStatus(symlinkDir);
+      FileStatus[] symlinks = fileSystem.listStatus(symlinkDir, FileUtils.HIDDEN_FILES_PATH_FILTER);
 
       // Read paths from each symlink file.
       for (FileStatus symlink : symlinks) {

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Mon Nov 17 22:36:47 2014
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
@@ -1402,7 +1403,7 @@ private void walkDirTree(FileStatus fSta
   }
 
   /* dfs. */
-  FileStatus[] children = fSys.listStatus(fSta.getPath());
+  FileStatus[] children = fSys.listStatus(fSta.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
   if (children != null) {
     for (FileStatus child : children) {
       walkDirTree(child, fSys, skewedColValueLocationMaps, newPartPath, skewedInfo);
@@ -2241,13 +2242,7 @@ private void constructOneLBLocationMap(F
       for (FileStatus src : srcs) {
         FileStatus[] items;
         if (src.isDir()) {
-          items = srcFs.listStatus(src.getPath(), new PathFilter() {
-            @Override
-            public boolean accept(Path p) {
-              String name = p.getName();
-              return !name.startsWith("_") && !name.startsWith(".");
-            }
-          });
+          items = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
           Arrays.sort(items);
         } else {
           items = new FileStatus[] {src};
@@ -2267,9 +2262,10 @@ private void constructOneLBLocationMap(F
           }
 
           if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES) &&
+            !HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR).equals(itemSource.getName()) &&
             item.isDir()) {
             throw new HiveException("checkPaths: " + src.getPath()
-                + " has nested directory" + itemSource);
+                + " has nested directory " + itemSource);
           }
           // Strip off the file type, if any so we don't make:
           // 000000_0.gz -> 000000_0.gz_copy_1
@@ -2324,7 +2320,7 @@ private void constructOneLBLocationMap(F
   //method is called. when the replace value is true, this method works a little different
   //from mv command if the destf is a directory, it replaces the destf instead of moving under
   //the destf. in this case, the replaced destf still preserves the original destf's permission
-  public static boolean renameFile(HiveConf conf, Path srcf, Path destf,
+  public static boolean moveFile(HiveConf conf, Path srcf, Path destf,
       FileSystem fs, boolean replace, boolean isSrcLocal) throws HiveException {
     boolean success = false;
 
@@ -2333,17 +2329,25 @@ private void constructOneLBLocationMap(F
         HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
     HadoopShims shims = ShimLoader.getHadoopShims();
     HadoopShims.HdfsFileStatus destStatus = null;
+    HadoopShims.HdfsEncryptionShim hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim();
 
+    // If source path is a subdirectory of the destination path:
+    //   ex: INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
+    //   where the staging directory is a subdirectory of the destination directory
+    // (1) Do not delete the dest dir before doing the move operation.
+    // (2) It is assumed that subdir and dir are in same encryption zone.
+    // (3) Move individual files from scr dir to dest dir.
+    boolean destIsSubDir = FileUtils.isSubDir(srcf, destf, fs);
     try {
       if (inheritPerms || replace) {
         try{
-          destStatus = shims.getFullFileStatus(conf, fs, destf);
+          destStatus = shims.getFullFileStatus(conf, fs, destf.getParent());
           //if destf is an existing directory:
           //if replace is true, delete followed by rename(mv) is equivalent to replace
           //if replace is false, rename (mv) actually move the src under dest dir
           //if destf is an existing file, rename is actually a replace, and do not need
           // to delete the file first
-          if (replace && destStatus.getFileStatus().isDir()) {
+          if (replace && !destIsSubDir) {
             fs.delete(destf, true);
           }
         } catch (FileNotFoundException ignore) {
@@ -2355,14 +2359,39 @@ private void constructOneLBLocationMap(F
       }
       if (!isSrcLocal) {
         // For NOT local src file, rename the file
-        success = fs.rename(srcf, destf);
+        if (hdfsEncryptionShim != null && (hdfsEncryptionShim.isPathEncrypted(srcf) || hdfsEncryptionShim.isPathEncrypted(destf))
+            && !hdfsEncryptionShim.arePathsOnSameEncryptionZone(srcf, destf))
+        {
+          LOG.info("Copying source " + srcf + " to " + destf + " because HDFS encryption zones are different.");
+          success = FileUtils.copy(srcf.getFileSystem(conf), srcf, destf.getFileSystem(conf), destf,
+              true,    // delete source
+              replace, // overwrite destination
+              conf);
+        } else {
+          if (destIsSubDir) {
+            FileStatus[] srcs = fs.listStatus(srcf, FileUtils.HIDDEN_FILES_PATH_FILTER);
+            for (FileStatus status : srcs) {
+              success = FileUtils.copy(srcf.getFileSystem(conf), status.getPath(), destf.getFileSystem(conf), destf,
+                  true,     // delete source
+                  replace,  // overwrite destination
+                  conf);
+
+              if (!success) {
+                throw new HiveException("Unable to move source " + status.getPath() + " to destination " + destf);
+              }
+            }
+          } else {
+            success = fs.rename(srcf, destf);
+          }
+        }
       } else {
         // For local src file, copy to hdfs
         fs.copyFromLocalFile(srcf, destf);
         success = true;
       }
-      LOG.info((replace ? "Replacing src:" : "Renaming src:") + srcf.toString()
-          + ";dest: " + destf.toString()  + ";Status:" + success);
+
+      LOG.info((replace ? "Replacing src:" : "Renaming src: ") + srcf.toString()
+          + ", dest: " + destf.toString()  + ", Status:" + success);
     } catch (IOException ioe) {
       throw new HiveException("Unable to move source " + srcf + " to destination " + destf, ioe);
     }
@@ -2429,7 +2458,7 @@ private void constructOneLBLocationMap(F
       try {
         for (List<Path[]> sdpairs : result) {
           for (Path[] sdpair : sdpairs) {
-            if (!renameFile(conf, sdpair[0], sdpair[1], fs, false, isSrcLocal)) {
+            if (!moveFile(conf, sdpair[0], sdpair[1], fs, false, isSrcLocal)) {
               throw new IOException("Cannot move " + sdpair[0] + " to "
                   + sdpair[1]);
             }
@@ -2560,11 +2589,16 @@ private void constructOneLBLocationMap(F
         try {
           FileSystem fs2 = oldPath.getFileSystem(conf);
           if (fs2.exists(oldPath)) {
-            FileUtils.trashFilesUnderDir(fs2, oldPath, conf);
+            // Do not delete oldPath if:
+            //  - destf is subdir of oldPath
+            //if ( !(fs2.equals(destf.getFileSystem(conf)) && FileUtils.isSubDir(oldPath, destf, fs2)))
+            if (FileUtils.isSubDir(oldPath, destf, fs2)) {
+              FileUtils.trashFilesUnderDir(fs2, oldPath, conf);
+            }
           }
         } catch (Exception e) {
           //swallow the exception
-          LOG.warn("Directory " + oldPath.toString() + " canot be removed:" + StringUtils.stringifyException(e));
+          LOG.warn("Directory " + oldPath.toString() + " cannot be removed: " + StringUtils.stringifyException(e), e);
         }
       }
 
@@ -2578,15 +2612,34 @@ private void constructOneLBLocationMap(F
             LOG.warn("Error creating directory " + destf.toString());
           }
           if (inheritPerms && success) {
-            destFs.setPermission(destfp, destFs.getFileStatus(destfp.getParent()).getPermission());
+            FsPermission perm = destFs.getFileStatus(destfp.getParent()).getPermission();
+            LOG.debug("Setting permissions on " + destfp + " to " + perm);
+            destFs.setPermission(destfp, perm);
           }
         }
 
-        boolean b = renameFile(conf, srcs[0].getPath(), destf, destFs, true,
-            isSrcLocal);
-        if (!b) {
-          throw new HiveException("Unable to move results from " + srcs[0].getPath()
-              + " to destination directory: " + destf);
+        // Copy/move each file under the source directory to avoid to delete the destination
+        // directory if it is the root of an HDFS encryption zone.
+        for (List<Path[]> sdpairs : result) {
+          for (Path[] sdpair : sdpairs) {
+            Path destParent = sdpair[1].getParent();
+            FileSystem destParentFs = destParent.getFileSystem(conf);
+            if (!destParentFs.isDirectory(destParent)) {
+              boolean success = destFs.mkdirs(destParent);
+              if (!success) {
+                LOG.warn("Error creating directory " + destParent);
+              }
+              if (inheritPerms && success) {
+                FsPermission perm = destFs.getFileStatus(destfp.getParent()).getPermission();
+                LOG.debug("Setting permissions on " + destfp + " to " + perm);
+                destFs.setPermission(destfp, perm);
+              }
+            }
+            if (!moveFile(conf, sdpair[0], sdpair[1], destFs, true, isSrcLocal)) {
+              throw new IOException("Unable to move file/directory from " + sdpair[0] +
+                  " to " + sdpair[1]);
+            }
+          }
         }
       } else { // srcf is a file or pattern containing wildcards
         if (!destFs.exists(destf)) {
@@ -2595,13 +2648,15 @@ private void constructOneLBLocationMap(F
             LOG.warn("Error creating directory " + destf.toString());
           }
           if (inheritPerms && success) {
-            destFs.setPermission(destf, destFs.getFileStatus(destf.getParent()).getPermission());
+            FsPermission perm = destFs.getFileStatus(destf.getParent()).getPermission();
+            LOG.debug("Setting permissions on " + destf + " to " + perm);
+            destFs.setPermission(destf, perm);
           }
         }
         // srcs must be a list of files -- ensured by LoadSemanticAnalyzer
         for (List<Path[]> sdpairs : result) {
           for (Path[] sdpair : sdpairs) {
-            if (!renameFile(conf, sdpair[0], sdpair[1], destFs, true,
+            if (!moveFile(conf, sdpair[0], sdpair[1], destFs, true,
                 isSrcLocal)) {
               throw new IOException("Error moving: " + sdpair[0] + " into: " + sdpair[1]);
             }

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java Mon Nov 17 22:36:47 2014
@@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -147,7 +148,7 @@ public class HiveMetaStoreChecker {
 
     for (Path dbPath : dbPaths) {
       FileSystem fs = dbPath.getFileSystem(conf);
-      FileStatus[] statuses = fs.listStatus(dbPath);
+      FileStatus[] statuses = fs.listStatus(dbPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
       for (FileStatus status : statuses) {
 
         if (status.isDir() && !tableNames.contains(status.getPath().getName())) {
@@ -362,7 +363,7 @@ public class HiveMetaStoreChecker {
   private void getAllLeafDirs(Path basePath, Set<Path> allDirs, FileSystem fs)
       throws IOException {
 
-    FileStatus[] statuses = fs.listStatus(basePath);
+    FileStatus[] statuses = fs.listStatus(basePath, FileUtils.HIDDEN_FILES_PATH_FILTER);
     boolean directoryFound=false;
 
     for (FileStatus status : statuses) {

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Mon Nov 17 22:36:47 2014
@@ -32,7 +32,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.ProtectMode;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -348,7 +348,7 @@ public class Partition implements Serial
      * partition String pathPattern = this.partPath.toString() + "/*"; try {
      * FileSystem fs = FileSystem.get(this.table.getDataLocation(),
      * Hive.get().getConf()); FileStatus srcs[] = fs.globStatus(new
-     * Path(pathPattern)); numBuckets = srcs.length; } catch (Exception e) {
+     * Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER); numBuckets = srcs.length; } catch (Exception e) {
      * throw new RuntimeException("Cannot get bucket count for table " +
      * this.table.getName(), e); } } return numBuckets;
      */
@@ -384,7 +384,7 @@ public class Partition implements Serial
         pathPattern = pathPattern + "/*";
       }
       LOG.info("Path pattern = " + pathPattern);
-      FileStatus srcs[] = fs.globStatus(new Path(pathPattern));
+      FileStatus srcs[] = fs.globStatus(new Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER);
       Arrays.sort(srcs);
       for (FileStatus src : srcs) {
         LOG.info("Got file: " + src.getPath());

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java Mon Nov 17 22:36:47 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.ProtectMode;
@@ -978,7 +979,7 @@ public class Table implements Serializab
         pathPattern = pathPattern + "/*";
       }
       LOG.info("Path pattern = " + pathPattern);
-      FileStatus srcs[] = fs.globStatus(new Path(pathPattern));
+      FileStatus srcs[] = fs.globStatus(new Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER);
       Arrays.sort(srcs);
       for (FileStatus src : srcs) {
         LOG.info("Got file: " + src.getPath());

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java Mon Nov 17 22:36:47 2014
@@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
@@ -80,7 +81,7 @@ abstract public class AbstractBucketJoin
     List<String> fileNames = new ArrayList<String>();
     try {
       FileSystem fs = location.getFileSystem(pGraphContext.getConf());
-      FileStatus[] files = fs.listStatus(new Path(location.toString()));
+      FileStatus[] files = fs.listStatus(new Path(location.toString()), FileUtils.HIDDEN_FILES_PATH_FILTER);
       if (files != null) {
         for (FileStatus file : files) {
           fileNames.add(file.getPath().toString());

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java Mon Nov 17 22:36:47 2014
@@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.ql.Driver;
@@ -123,22 +124,23 @@ public final class IndexUtils {
       Partition part) throws HiveException {
     LOG.info("checking index staleness...");
     try {
-      FileSystem partFs = part.getDataLocation().getFileSystem(hive.getConf());
-      FileStatus partFss = partFs.getFileStatus(part.getDataLocation());
-      String ts = index.getParameters().get(part.getSpec().toString());
-      if (ts == null) {
+      String indexTs = index.getParameters().get(part.getSpec().toString());
+      if (indexTs == null) {
         return false;
       }
-      long indexTs = Long.parseLong(ts);
-      LOG.info(partFss.getModificationTime());
-      LOG.info(ts);
-      if (partFss.getModificationTime() > indexTs) {
-        LOG.info("index is stale on the partitions that matched " + part.getSpec());
-        return false;
+
+      FileSystem partFs = part.getDataLocation().getFileSystem(hive.getConf());
+      FileStatus[] parts = partFs.listStatus(part.getDataLocation(), FileUtils.HIDDEN_FILES_PATH_FILTER);
+      for (FileStatus status : parts) {
+        if (status.getModificationTime() > Long.parseLong(indexTs)) {
+          LOG.info("Index is stale on partition '" + part.getName()
+              + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath()
+              + "' is higher than index creation time (" + indexTs + ").");
+          return false;
+        }
       }
     } catch (IOException e) {
-      LOG.info("failed to grab timestamp info");
-      throw new HiveException(e);
+      throw new HiveException("Failed to grab timestamp information from partition '" + part.getName() + "': " + e.getMessage(), e);
     }
     return true;
   }
@@ -156,22 +158,23 @@ public final class IndexUtils {
     for (Index index : indexes) {
       LOG.info("checking index staleness...");
       try {
-        FileSystem srcFs = src.getPath().getFileSystem(hive.getConf());
-        FileStatus srcFss= srcFs.getFileStatus(src.getPath());
-        String ts = index.getParameters().get("base_timestamp");
-        if (ts == null) {
+        String indexTs = index.getParameters().get("base_timestamp");
+        if (indexTs == null) {
           return false;
         }
-        long indexTs = Long.parseLong(ts);
-        LOG.info(srcFss.getModificationTime());
-        LOG.info(ts);
-        if (srcFss.getModificationTime() > indexTs) {
-          LOG.info("index is stale ");
-          return false;
+
+        FileSystem srcFs = src.getPath().getFileSystem(hive.getConf());
+        FileStatus[] srcs = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
+        for (FileStatus status : srcs) {
+          if (status.getModificationTime() > Long.parseLong(indexTs)) {
+            LOG.info("Index is stale on table '" + src.getTableName()
+                + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath()
+                + "' is higher than index creation time (" + indexTs + ").");
+            return false;
+          }
         }
       } catch (IOException e) {
-        LOG.info("failed to grab timestamp info");
-        throw new HiveException(e);
+        throw new HiveException("Failed to grab timestamp information from table '" + src.getTableName() + "': " + e.getMessage(), e);
       }
     }
     return true;

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java Mon Nov 17 22:36:47 2014
@@ -70,7 +70,7 @@ public class ExportSemanticAnalyzer exte
           throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast,
                     "Target is not a directory : " + toURI));
         } else {
-          FileStatus[] files = fs.listStatus(toPath);
+          FileStatus[] files = fs.listStatus(toPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
           if (files != null && files.length != 0) {
             throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast,
                           "Target is not an empty directory : " + toURI));

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java Mon Nov 17 22:36:47 2014
@@ -360,7 +360,7 @@ public class ImportSemanticAnalyzer exte
       throws IOException, SemanticException {
     LOG.debug("checking emptiness of " + targetPath.toString());
     if (fs.exists(targetPath)) {
-      FileStatus[] status = fs.listStatus(targetPath);
+      FileStatus[] status = fs.listStatus(targetPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
       if (status.length > 0) {
         LOG.debug("Files inc. " + status[0].getPath().toString()
             + " found in path : " + targetPath.toString());

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Mon Nov 17 22:36:47 2014
@@ -22,14 +22,15 @@ import static org.apache.hadoop.hive.con
 
 import java.io.IOException;
 import java.io.Serializable;
+import java.security.AccessControlException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.math.BigDecimal;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.BitSet;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
@@ -58,8 +59,10 @@ import org.antlr.runtime.tree.TreeWizard
 import org.antlr.runtime.tree.TreeWizard.ContextVisitor;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.StatsSetupConst;
@@ -236,8 +239,14 @@ import org.apache.hadoop.hive.serde2.typ
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import javax.security.auth.login.LoginException;
+
 import org.eigenbase.rel.AggregateCall;
 import org.eigenbase.rel.AggregateRelBase;
 import org.eigenbase.rel.Aggregation;
@@ -1790,7 +1799,7 @@ public class SemanticAnalyzer extends Ba
                 throw new SemanticException(e);
               }
               try {
-                fname = ctx.getExternalTmpPath(
+                fname = ctx.getExtTmpPathRelTo(
                     FileUtils.makeQualified(location, conf)).toString();
               } catch (Exception e) {
                 throw new SemanticException(generateErrorMessage(ast,
@@ -1806,7 +1815,12 @@ public class SemanticAnalyzer extends Ba
             } else {
               // This is the only place where isQuery is set to true; it defaults to false.
               qb.setIsQuery(true);
-              fname = ctx.getMRTmpPath().toString();
+              Path table_path = getStrongestEncryptedTablePath(qb);
+              if (table_path == null) {
+                fname = ctx.getMRTmpPath().toString();
+              } else {
+                fname = ctx.getMRTmpPath(table_path.toUri()).toString();
+              }
               ctx.setResDir(new Path(fname));
             }
           }
@@ -1863,6 +1877,68 @@ public class SemanticAnalyzer extends Ba
     }
   }
 
+  /**
+   * Gets the strongest encrypted table path.
+   *
+   * @param qb The QB object that contains a list of all table locations.
+   * @return The strongest encrypted path
+   * @throws HiveException if an error occurred attempting to compare the encryption strength
+   */
+  private Path getStrongestEncryptedTablePath(QB qb) throws HiveException {
+    List<String> tabAliases = new ArrayList<String>(qb.getTabAliases());
+    Path strongestPath = null;
+    HadoopShims.HdfsEncryptionShim hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim();
+
+    /* Walk through all found table locations to get the most encrypted table */
+    for (String alias : tabAliases) {
+      Table tab = qb.getMetaData().getTableForAlias(alias);
+      if (tab != null) {
+        Path tablePath = tab.getDataLocation();
+        if (tablePath != null) {
+          try {
+            if (strongestPath == null) {
+              strongestPath = tablePath;
+            } else if (hdfsEncryptionShim != null
+                && tablePath.toUri().getScheme().equals("hdfs")
+                && hdfsEncryptionShim.isPathEncrypted(tablePath))
+            {
+              if (hdfsEncryptionShim.comparePathKeyStrength(tablePath, strongestPath) > 0) {
+                strongestPath = tablePath;
+              }
+            }
+          } catch (IOException e) {
+            throw new HiveException("Cannot search for the most secure table path", e);
+          }
+        }
+      }
+    }
+
+    /* Check for writing permissions on the selected location. */
+    if (strongestPath != null && strongestPath.toUri().getScheme().equals("hdfs")) {
+      try {
+        FileSystem fs = strongestPath.getFileSystem(SessionState.get().getConf());
+        UserGroupInformation ugi = ShimLoader.getHadoopShims().getUGIForConf(SessionState.get().getConf());
+        FileStatus status = fs.getFileStatus(strongestPath);
+
+        FileUtils.checkFileAccessWithImpersonation(fs, status, FsAction.WRITE, ugi.getUserName());
+      } catch (AccessControlException e) {
+        try {
+          if (hdfsEncryptionShim == null || !hdfsEncryptionShim.isPathEncrypted(strongestPath)) {
+            strongestPath = null;
+          } else {
+            throw new HiveException(e.getMessage(), e);
+          }
+        } catch (IOException e1) {
+          throw new HiveException(e.getMessage(), e);
+        }
+      } catch (Exception e) {
+        throw new HiveException(e.getMessage(), e);
+      }
+    }
+
+    return strongestPath;
+  }
+
   private void replaceViewReferenceWithDefinition(QB qb, Table tab,
       String tab_name, String alias) throws SemanticException {
 
@@ -6050,7 +6126,7 @@ public class SemanticAnalyzer extends Ba
       if (isNonNativeTable) {
         queryTmpdir = dest_path;
       } else {
-        queryTmpdir = ctx.getExternalTmpPath(dest_path);
+        queryTmpdir = ctx.getExtTmpPathRelTo(dest_path);
       }
       if (dpCtx != null) {
         // set the root of the temporary path where dynamic partition columns will populate
@@ -6231,7 +6307,7 @@ public class SemanticAnalyzer extends Ba
 
         try {
           Path qPath = FileUtils.makeQualified(dest_path, conf);
-          queryTmpdir = ctx.getExternalTmpPath(qPath);
+          queryTmpdir = ctx.getExtTmpPathRelTo(qPath);
         } catch (Exception e) {
           throw new SemanticException("Error creating temporary folder on: "
               + dest_path, e);
@@ -6411,7 +6487,7 @@ public class SemanticAnalyzer extends Ba
     // it should be the same as the MoveWork's sourceDir.
     fileSinkDesc.setStatsAggPrefix(fileSinkDesc.getDirName().toString());
     if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {
-      String statsTmpLoc = ctx.getExternalTmpPath(queryTmpdir).toString();
+      String statsTmpLoc = ctx.getExtTmpPathRelTo(queryTmpdir).toString();
       LOG.info("Set stats collection dir : " + statsTmpLoc);
       conf.set(StatsSetupConst.STATS_TMP_LOC, statsTmpLoc);
     }
@@ -9541,7 +9617,7 @@ public class SemanticAnalyzer extends Ba
       tsDesc.setGatherStats(false);
     } else {
       if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {
-        String statsTmpLoc = ctx.getExternalTmpPath(tab.getPath()).toString();
+        String statsTmpLoc = ctx.getExtTmpPathRelTo(tab.getPath()).toString();
         LOG.info("Set stats collection dir : " + statsTmpLoc);
         conf.set(StatsSetupConst.STATS_TMP_LOC, statsTmpLoc);
       }

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java Mon Nov 17 22:36:47 2014
@@ -64,6 +64,7 @@ import org.apache.hadoop.hive.ql.securit
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext.CLIENT_TYPE;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactoryImpl;
 import org.apache.hadoop.hive.ql.util.DosToUnix;
+import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -179,6 +180,11 @@ public class SessionState {
   private String userIpAddress;
 
   /**
+   * Gets information about HDFS encryption
+   */
+  private HadoopShims.HdfsEncryptionShim hdfsEncryptionShim;
+
+  /**
    * Lineage state.
    */
   LineageState ls;
@@ -372,6 +378,21 @@ public class SessionState {
     return txnAutoCommit;
   }
 
+  public HadoopShims.HdfsEncryptionShim getHdfsEncryptionShim() throws HiveException {
+    if (hdfsEncryptionShim == null) {
+      try {
+        FileSystem fs = FileSystem.get(conf);
+        if (fs.getUri().getScheme().equals("hdfs")) {
+          hdfsEncryptionShim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf);
+        }
+      } catch (Exception e) {
+        throw new HiveException(e);
+      }
+    }
+
+    return hdfsEncryptionShim;
+  }
+
   /**
    * Singleton Session object per thread.
    *
@@ -404,7 +425,6 @@ public class SessionState {
    * when switching from one session to another.
    */
   public static SessionState start(SessionState startSs) {
-
     setCurrentSessionState(startSs);
 
     if (startSs.hiveHist == null){

Modified: hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java (original)
+++ hive/branches/HIVE-8065/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java Mon Nov 17 22:36:47 2014
@@ -22,6 +22,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.CompactionRequest;
@@ -263,7 +264,7 @@ public class Initiator extends Compactor
 
   private long sumDirSize(FileSystem fs, Path dir) throws IOException {
     long size = 0;
-    FileStatus[] buckets = fs.listStatus(dir);
+    FileStatus[] buckets = fs.listStatus(dir, FileUtils.HIDDEN_FILES_PATH_FILTER);
     for (int i = 0; i < buckets.length; i++) {
       size += buckets[i].getLen();
     }

Modified: hive/branches/HIVE-8065/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java (original)
+++ hive/branches/HIVE-8065/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java Mon Nov 17 22:36:47 2014
@@ -101,7 +101,7 @@ public class TestSymlinkTextInputFormat 
   }
 
   /**
-   * Test combine symlink text input file. Two input dir, and each contails one
+   * Test combine symlink text input file. Two input dir, and each contains one
    * file, and then create one symlink file containing these 2 files. Normally
    * without combine, it will return at least 2 splits
    */
@@ -165,8 +165,12 @@ public class TestSymlinkTextInputFormat 
             + " failed with exit code= " + ecode);
       }
 
-      String cmd = "select key*1 from " + tblName;
-      drv.compile(cmd);
+      String cmd = "select key from " + tblName;
+      ecode = drv.compile(cmd);
+      if (ecode != 0) {
+        throw new Exception("Select compile: " + cmd
+            + " failed with exit code= " + ecode);
+      }
 
       //create scratch dir
       Context ctx = new Context(newJob);

Modified: hive/branches/HIVE-8065/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/test/queries/clientpositive/smb_mapjoin_11.q?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/test/queries/clientpositive/smb_mapjoin_11.q (original)
+++ hive/branches/HIVE-8065/ql/src/test/queries/clientpositive/smb_mapjoin_11.q Mon Nov 17 22:36:47 2014
@@ -29,6 +29,13 @@ INSERT OVERWRITE TABLE test_table3 PARTI
 
 INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1';
 
+SELECT * FROM test_table1 ORDER BY key;
+SELECT * FROM test_table3 ORDER BY key;
+EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16);
+EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16);
+SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16);
+SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16);
+
 -- Join data from a sampled bucket to verify the data is bucketed
 SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1';
 

Modified: hive/branches/HIVE-8065/ql/src/test/results/clientnegative/fs_default_name2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/ql/src/test/results/clientnegative/fs_default_name2.q.out?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/ql/src/test/results/clientnegative/fs_default_name2.q.out (original)
+++ hive/branches/HIVE-8065/ql/src/test/results/clientnegative/fs_default_name2.q.out Mon Nov 17 22:36:47 2014
@@ -1 +1 @@
-FAILED: IllegalArgumentException Illegal character in scheme name at index 0: 'http://www.example.com
+FAILED: SemanticException java.lang.IllegalArgumentException: Illegal character in scheme name at index 0: 'http://www.example.com

Modified: hive/branches/HIVE-8065/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java (original)
+++ hive/branches/HIVE-8065/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java Mon Nov 17 22:36:47 2014
@@ -82,6 +82,7 @@ import org.apache.hadoop.security.UnixUs
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.HadoopArchives;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.VersionInfo;
 
@@ -868,6 +869,7 @@ public class Hadoop20Shims implements Ha
     ret.put("HADOOPSPECULATIVEEXECREDUCERS", "mapred.reduce.tasks.speculative.execution");
     ret.put("MAPREDSETUPCLEANUPNEEDED", "mapred.committer.job.setup.cleanup.needed");
     ret.put("MAPREDTASKCLEANUPNEEDED", "mapreduce.job.committer.task.cleanup.needed");
+    ret.put("HADOOPSECURITYKEYPROVIDER", "hadoop.encryption.is.not.supported");
     return ret;
   }
 
@@ -951,4 +953,34 @@ public class Hadoop20Shims implements Ha
   public void setZookeeperClientKerberosJaasConfig(String principal, String keyTabFile) {
     // Not supported
   }
+
+  @Override
+  public boolean runDistCp(Path src, Path dst, Configuration conf) throws IOException {
+    int rc;
+
+    // Creates the command-line parameters for distcp
+    String[] params = {"-update", "-skipcrccheck", src.toString(), dst.toString()};
+
+    try {
+      Class clazzDistCp = Class.forName("org.apache.hadoop.tools.DistCp");
+      Constructor c = clazzDistCp.getConstructor();
+      c.setAccessible(true);
+      Tool distcp = (Tool)c.newInstance();
+      distcp.setConf(conf);
+      rc = distcp.run(params);
+    } catch (ClassNotFoundException e) {
+      throw new IOException("Cannot find DistCp class package: " + e.getMessage());
+    } catch (NoSuchMethodException e) {
+      throw new IOException("Cannot get DistCp constructor: " + e.getMessage());
+    } catch (Exception e) {
+      throw new IOException("Cannot execute DistCp process: " + e, e);
+    }
+
+    return (0 == rc);
+  }
+
+  @Override
+  public HdfsEncryptionShim createHdfsEncryptionShim(FileSystem fs, Configuration conf) throws IOException {
+    return new HadoopShims.NoopHdfsEncryptionShim();
+  }
 }

Modified: hive/branches/HIVE-8065/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java (original)
+++ hive/branches/HIVE-8065/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java Mon Nov 17 22:36:47 2014
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hive.shims;
 
 import java.io.IOException;
+import java.lang.Override;
+import java.lang.reflect.Constructor;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URI;
@@ -63,6 +65,7 @@ import org.apache.hadoop.mapreduce.TaskI
 import org.apache.hadoop.security.KerberosName;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.VersionInfo;
 
 
@@ -491,6 +494,7 @@ public class Hadoop20SShims extends Hado
     ret.put("HADOOPSPECULATIVEEXECREDUCERS", "mapred.reduce.tasks.speculative.execution");
     ret.put("MAPREDSETUPCLEANUPNEEDED", "mapred.committer.job.setup.cleanup.needed");
     ret.put("MAPREDTASKCLEANUPNEEDED", "mapreduce.job.committer.task.cleanup.needed");
+    ret.put("HADOOPSECURITYKEYPROVIDER", "hadoop.encryption.is.not.supported");
     return ret;
   }
 
@@ -592,4 +596,34 @@ public class Hadoop20SShims extends Hado
       return kerberosName.getShortName();
     }
   }
+
+  @Override
+  public boolean runDistCp(Path src, Path dst, Configuration conf) throws IOException {
+    int rc;
+
+    // Creates the command-line parameters for distcp
+    String[] params = {"-update", "-skipcrccheck", src.toString(), dst.toString()};
+
+    try {
+      Class clazzDistCp = Class.forName("org.apache.hadoop.tools.distcp2");
+      Constructor c = clazzDistCp.getConstructor();
+      c.setAccessible(true);
+      Tool distcp = (Tool)c.newInstance();
+      distcp.setConf(conf);
+      rc = distcp.run(params);
+    } catch (ClassNotFoundException e) {
+      throw new IOException("Cannot find DistCp class package: " + e.getMessage());
+    } catch (NoSuchMethodException e) {
+      throw new IOException("Cannot get DistCp constructor: " + e.getMessage());
+    } catch (Exception e) {
+      throw new IOException("Cannot execute DistCp process: " + e, e);
+    }
+
+    return (0 == rc) ? true : false;
+  }
+
+  @Override
+  public HdfsEncryptionShim createHdfsEncryptionShim(FileSystem fs, Configuration conf) throws IOException {
+    return new HadoopShims.NoopHdfsEncryptionShim();
+  }
 }

Modified: hive/branches/HIVE-8065/shims/0.23/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/shims/0.23/pom.xml?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/shims/0.23/pom.xml (original)
+++ hive/branches/HIVE-8065/shims/0.23/pom.xml Mon Nov 17 22:36:47 2014
@@ -139,4 +139,17 @@
      <type>test-jar</type>
    </dependency>
   </dependencies>
+
+  <profiles>
+    <profile>
+      <id>hadoop-2</id>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-distcp</artifactId>
+          <version>${hadoop-23.version}</version>
+        </dependency>
+      </dependencies>
+    </profile>
+  </profiles>
 </project>

Modified: hive/branches/HIVE-8065/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
URL: http://svn.apache.org/viewvc/hive/branches/HIVE-8065/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java?rev=1640247&r1=1640246&r2=1640247&view=diff
==============================================================================
--- hive/branches/HIVE-8065/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (original)
+++ hive/branches/HIVE-8065/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java Mon Nov 17 22:36:47 2014
@@ -19,10 +19,12 @@ package org.apache.hadoop.hive.shims;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.lang.reflect.Constructor;
 import java.lang.reflect.Method;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.security.AccessControlException;
 import java.util.ArrayList;
 import java.util.Comparator;
@@ -33,6 +35,8 @@ import java.util.TreeMap;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.DefaultFileAccess;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -54,6 +58,8 @@ import org.apache.hadoop.fs.permission.A
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.mapred.ClusterStatus;
 import org.apache.hadoop.mapred.JobConf;
@@ -75,6 +81,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.tez.test.MiniTezCluster;
 
@@ -735,6 +742,7 @@ public class Hadoop23Shims extends Hadoo
     ret.put("HADOOPSPECULATIVEEXECREDUCERS", "mapreduce.reduce.speculative");
     ret.put("MAPREDSETUPCLEANUPNEEDED", "mapreduce.job.committer.setup.cleanup.needed");
     ret.put("MAPREDTASKCLEANUPNEEDED", "mapreduce.job.committer.task.cleanup.needed");
+    ret.put("HADOOPSECURITYKEYPROVIDER", "hadoop.security.key.provider.path");
     return ret;
  }
 
@@ -904,4 +912,131 @@ public class Hadoop23Shims extends Hadoo
       return kerberosName.getShortName();
     }
   }
+
+  @Override
+  public boolean runDistCp(Path src, Path dst, Configuration conf) throws IOException {
+    int rc;
+
+    // Creates the command-line parameters for distcp
+    String[] params = {"-update", "-skipcrccheck", src.toString(), dst.toString()};
+
+    try {
+      Class clazzDistCp = Class.forName("org.apache.hadoop.tools.DistCp");
+      Constructor c = clazzDistCp.getConstructor();
+      c.setAccessible(true);
+      Tool distcp = (Tool)c.newInstance();
+      distcp.setConf(conf);
+      rc = distcp.run(params);
+    } catch (ClassNotFoundException e) {
+      throw new IOException("Cannot find DistCp class package: " + e.getMessage());
+    } catch (NoSuchMethodException e) {
+      throw new IOException("Cannot get DistCp constructor: " + e.getMessage());
+    } catch (Exception e) {
+      throw new IOException("Cannot execute DistCp process: " + e, e);
+    }
+
+    return (0 == rc);
+  }
+
+  public static class HdfsEncryptionShim implements HadoopShims.HdfsEncryptionShim {
+    /**
+     * Gets information about key encryption metadata
+     */
+    private KeyProvider keyProvider = null;
+
+    /**
+     * Gets information about HDFS encryption zones
+     */
+    private HdfsAdmin hdfsAdmin = null;
+
+    public HdfsEncryptionShim(URI uri, Configuration conf) throws IOException {
+      hdfsAdmin = new HdfsAdmin(uri, conf);
+
+      try {
+        String keyProviderPath = conf.get(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPSECURITYKEYPROVIDER"), null);
+        if (keyProviderPath != null) {
+          keyProvider = new KMSClientProvider(new URI(keyProviderPath), conf);
+        }
+      } catch (URISyntaxException e) {
+        throw new IOException("Invalid HDFS security key provider path", e);
+      } catch (Exception e) {
+        throw new IOException("Cannot create HDFS security object: ", e);
+      }
+    }
+
+    @Override
+    public boolean isPathEncrypted(Path path) throws IOException {
+      return (hdfsAdmin.getEncryptionZoneForPath(path) != null);
+    }
+
+    @Override
+    public boolean arePathsOnSameEncryptionZone(Path path1, Path path2) throws IOException {
+      EncryptionZone zone1, zone2;
+
+      zone1 = hdfsAdmin.getEncryptionZoneForPath(path1);
+      zone2 = hdfsAdmin.getEncryptionZoneForPath(path2);
+
+      if (zone1 == null && zone2 == null) {
+        return true;
+      } else if (zone1 == null || zone2 == null) {
+        return false;
+      }
+
+      return zone1.equals(zone2);
+    }
+
+    @Override
+    public int comparePathKeyStrength(Path path1, Path path2) throws IOException {
+      EncryptionZone zone1, zone2;
+
+      zone1 = hdfsAdmin.getEncryptionZoneForPath(path1);
+      zone2 = hdfsAdmin.getEncryptionZoneForPath(path2);
+
+      if (zone1 == null && zone2 == null) {
+        return 0;
+      } else if (zone1 == null) {
+        return -1;
+      } else if (zone2 == null) {
+        return 1;
+      }
+
+      return compareKeyStrength(zone1.getKeyName(), zone2.getKeyName());
+    }
+
+    /**
+     * Compares two encryption key strengths.
+     *
+     * @param keyname1 Keyname to compare
+     * @param keyname2 Keyname to compare
+     * @return 1 if path1 is stronger; 0 if paths are equals; -1 if path1 is weaker.
+     * @throws IOException If an error occurred attempting to get key metadata
+     */
+    private int compareKeyStrength(String keyname1, String keyname2) throws IOException {
+      KeyProvider.Metadata meta1, meta2;
+
+      if (keyProvider == null) {
+        throw new IOException("HDFS security key provider is not configured on your server.");
+      }
+
+      meta1 = keyProvider.getMetadata(keyname1);
+      meta2 = keyProvider.getMetadata(keyname2);
+
+      if (meta1.getBitLength() < meta2.getBitLength()) {
+        return -1;
+      } else if (meta1.getBitLength() == meta2.getBitLength()) {
+        return 0;
+      } else {
+        return 1;
+      }
+    }
+  }
+
+  @Override
+  public HadoopShims.HdfsEncryptionShim createHdfsEncryptionShim(FileSystem fs, Configuration conf) throws IOException {
+    URI uri = fs.getUri();
+    if ("hdfs".equals(uri.getScheme())) {
+      return new HdfsEncryptionShim(uri, conf);
+    }
+    return new HadoopShims.NoopHdfsEncryptionShim();
+  }
 }