You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2015/01/19 18:28:54 UTC
svn commit: r1653062 [2/6] - in /hive/trunk:
common/src/java/org/apache/hadoop/hive/common/
common/src/java/org/apache/hadoop/hive/conf/ data/scripts/
hbase-handler/src/java/org/apache/hadoop/hive/hbase/
hcatalog/core/src/main/java/org/apache/hive/hcat...
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Mon Jan 19 17:28:53 2015
@@ -29,6 +29,7 @@ import static org.apache.hadoop.hive.ser
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -36,6 +37,7 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
+import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
@@ -48,6 +50,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.HiveStatsUtils;
import org.apache.hadoop.hive.common.ObjectPair;
@@ -1352,7 +1355,7 @@ public class Hive {
}
if (replace) {
- Hive.replaceFiles(loadPath, newPartPath, oldPartPath, getConf(),
+ Hive.replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(),
isSrcLocal);
} else {
FileSystem fs = tbl.getDataLocation().getFileSystem(conf);
@@ -1411,7 +1414,7 @@ private void walkDirTree(FileStatus fSta
}
/* dfs. */
- FileStatus[] children = fSys.listStatus(fSta.getPath());
+ FileStatus[] children = fSys.listStatus(fSta.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
if (children != null) {
for (FileStatus child : children) {
walkDirTree(child, fSys, skewedColValueLocationMaps, newPartPath, skewedInfo);
@@ -2187,7 +2190,7 @@ private void constructOneLBLocationMap(F
boolean grantOption) throws HiveException {
try {
return getMSC().grant_role(roleName, userName, principalType, grantor,
- grantorType, grantOption);
+ grantorType, grantOption);
} catch (Exception e) {
throw new HiveException(e);
}
@@ -2282,13 +2285,7 @@ private void constructOneLBLocationMap(F
for (FileStatus src : srcs) {
FileStatus[] items;
if (src.isDir()) {
- items = srcFs.listStatus(src.getPath(), new PathFilter() {
- @Override
- public boolean accept(Path p) {
- String name = p.getName();
- return !name.startsWith("_") && !name.startsWith(".");
- }
- });
+ items = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
Arrays.sort(items);
} else {
items = new FileStatus[] {src};
@@ -2308,9 +2305,10 @@ private void constructOneLBLocationMap(F
}
if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES) &&
+ !HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR).equals(itemSource.getName()) &&
item.isDir()) {
throw new HiveException("checkPaths: " + src.getPath()
- + " has nested directory" + itemSource);
+ + " has nested directory " + itemSource);
}
// Strip off the file type, if any so we don't make:
// 000000_0.gz -> 000000_0.gz_copy_1
@@ -2361,11 +2359,54 @@ private void constructOneLBLocationMap(F
return false;
}
+ private static boolean isSubDir(Path srcf, Path destf, FileSystem fs, boolean isSrcLocal){
+ if (srcf == null) {
+ LOG.debug("The source path is null for isSubDir method.");
+ return false;
+ }
+
+ String fullF1 = getQualifiedPathWithoutSchemeAndAuthority(srcf, fs);
+ String fullF2 = getQualifiedPathWithoutSchemeAndAuthority(destf, fs);
+
+ boolean isInTest = Boolean.valueOf(HiveConf.getBoolVar(fs.getConf(), ConfVars.HIVE_IN_TEST));
+ // In the automation, the data warehouse is the local file system based.
+ LOG.debug("The source path is " + fullF1 + " and the destination path is " + fullF2);
+ if (isInTest) {
+ return fullF1.startsWith(fullF2);
+ }
+
+ // schema is diff, return false
+ String schemaSrcf = srcf.toUri().getScheme();
+ String schemaDestf = destf.toUri().getScheme();
+
+ // if the schemaDestf is null, it means the destination is not in the local file system
+ if (schemaDestf == null && isSrcLocal) {
+ LOG.debug("The source file is in the local while the dest not.");
+ return false;
+ }
+
+ // If both schema information are provided, they should be the same.
+ if (schemaSrcf != null && schemaDestf != null && !schemaSrcf.equals(schemaDestf)) {
+ LOG.debug("The source path's schema is " + schemaSrcf +
+ " and the destination path's schema is " + schemaDestf + ".");
+ return false;
+ }
+
+ LOG.debug("The source path is " + fullF1 + " and the destination path is " + fullF2);
+ return fullF1.startsWith(fullF2);
+ }
+
+ private static String getQualifiedPathWithoutSchemeAndAuthority(Path srcf, FileSystem fs) {
+ Path currentWorkingDir = fs.getWorkingDirectory();
+ Path path = srcf.makeQualified(srcf.toUri(), currentWorkingDir);
+ return Path.getPathWithoutSchemeAndAuthority(path).toString();
+ }
+
//it is assumed that parent directory of the destf should already exist when this
//method is called. when the replace value is true, this method works a little different
//from mv command if the destf is a directory, it replaces the destf instead of moving under
//the destf. in this case, the replaced destf still preserves the original destf's permission
- public static boolean renameFile(HiveConf conf, Path srcf, Path destf,
+ public static boolean moveFile(HiveConf conf, Path srcf, Path destf,
FileSystem fs, boolean replace, boolean isSrcLocal) throws HiveException {
boolean success = false;
@@ -2374,17 +2415,26 @@ private void constructOneLBLocationMap(F
HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
HadoopShims shims = ShimLoader.getHadoopShims();
HadoopShims.HdfsFileStatus destStatus = null;
+ HadoopShims.HdfsEncryptionShim hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim();
+ // If source path is a subdirectory of the destination path:
+ // ex: INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
+ // where the staging directory is a subdirectory of the destination directory
+ // (1) Do not delete the dest dir before doing the move operation.
+ // (2) It is assumed that subdir and dir are in same encryption zone.
+ // (3) Move individual files from scr dir to dest dir.
+ boolean destIsSubDir = isSubDir(srcf, destf, fs, isSrcLocal);
try {
if (inheritPerms || replace) {
try{
- destStatus = shims.getFullFileStatus(conf, fs, destf);
+ destStatus = shims.getFullFileStatus(conf, fs, destf.getParent());
//if destf is an existing directory:
//if replace is true, delete followed by rename(mv) is equivalent to replace
//if replace is false, rename (mv) actually move the src under dest dir
//if destf is an existing file, rename is actually a replace, and do not need
// to delete the file first
- if (replace && destStatus.getFileStatus().isDir()) {
+ if (replace && !destIsSubDir) {
+ LOG.debug("The path " + destf.toString() + " is deleted");
fs.delete(destf, true);
}
} catch (FileNotFoundException ignore) {
@@ -2396,14 +2446,39 @@ private void constructOneLBLocationMap(F
}
if (!isSrcLocal) {
// For NOT local src file, rename the file
- success = fs.rename(srcf, destf);
+ if (hdfsEncryptionShim != null && (hdfsEncryptionShim.isPathEncrypted(srcf) || hdfsEncryptionShim.isPathEncrypted(destf))
+ && !hdfsEncryptionShim.arePathsOnSameEncryptionZone(srcf, destf))
+ {
+ LOG.info("Copying source " + srcf + " to " + destf + " because HDFS encryption zones are different.");
+ success = FileUtils.copy(srcf.getFileSystem(conf), srcf, destf.getFileSystem(conf), destf,
+ true, // delete source
+ replace, // overwrite destination
+ conf);
+ } else {
+ if (destIsSubDir) {
+ FileStatus[] srcs = fs.listStatus(srcf, FileUtils.HIDDEN_FILES_PATH_FILTER);
+ for (FileStatus status : srcs) {
+ success = FileUtils.copy(srcf.getFileSystem(conf), status.getPath(), destf.getFileSystem(conf), destf,
+ true, // delete source
+ replace, // overwrite destination
+ conf);
+
+ if (!success) {
+ throw new HiveException("Unable to move source " + status.getPath() + " to destination " + destf);
+ }
+ }
+ } else {
+ success = fs.rename(srcf, destf);
+ }
+ }
} else {
// For local src file, copy to hdfs
fs.copyFromLocalFile(srcf, destf);
success = true;
}
- LOG.info((replace ? "Replacing src:" : "Renaming src:") + srcf.toString()
- + ";dest: " + destf.toString() + ";Status:" + success);
+
+ LOG.info((replace ? "Replacing src:" : "Renaming src: ") + srcf.toString()
+ + ", dest: " + destf.toString() + ", Status:" + success);
} catch (IOException ioe) {
throw new HiveException("Unable to move source " + srcf + " to destination " + destf, ioe);
}
@@ -2470,7 +2545,7 @@ private void constructOneLBLocationMap(F
try {
for (List<Path[]> sdpairs : result) {
for (Path[] sdpair : sdpairs) {
- if (!renameFile(conf, sdpair[0], sdpair[1], fs, false, isSrcLocal)) {
+ if (!moveFile(conf, sdpair[0], sdpair[1], fs, false, isSrcLocal)) {
throw new IOException("Cannot move " + sdpair[0] + " to "
+ sdpair[1]);
}
@@ -2563,6 +2638,7 @@ private void constructOneLBLocationMap(F
* srcf, destf, and tmppath should resident in the same DFS, but the oldPath can be in a
* different DFS.
*
+ * @param tablePath path of the table. Used to identify permission inheritance.
* @param srcf
* Source directory to be renamed to tmppath. It should be a
* leaf directory where the final data files reside. However it
@@ -2570,13 +2646,15 @@ private void constructOneLBLocationMap(F
* @param destf
* The directory where the final data needs to go
* @param oldPath
- * The directory where the old data location, need to be cleaned up.
+ * The directory where the old data location, need to be cleaned up. Most of time, will be the same
+ * as destf, unless its across FileSystem boundaries.
* @param isSrcLocal
* If the source directory is LOCAL
*/
- static protected void replaceFiles(Path srcf, Path destf, Path oldPath,
- HiveConf conf, boolean isSrcLocal) throws HiveException {
+ protected static void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, HiveConf conf,
+ boolean isSrcLocal) throws HiveException {
try {
+
FileSystem destFs = destf.getFileSystem(conf);
boolean inheritPerms = HiveConf.getBoolVar(conf,
HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
@@ -2597,15 +2675,24 @@ private void constructOneLBLocationMap(F
List<List<Path[]>> result = checkPaths(conf, destFs, srcs, srcFs, destf,
true);
+ HadoopShims shims = ShimLoader.getHadoopShims();
if (oldPath != null) {
try {
FileSystem fs2 = oldPath.getFileSystem(conf);
if (fs2.exists(oldPath)) {
- FileUtils.trashFilesUnderDir(fs2, oldPath, conf);
+ // Do not delete oldPath if:
+ // - destf is subdir of oldPath
+ //if ( !(fs2.equals(destf.getFileSystem(conf)) && FileUtils.isSubDir(oldPath, destf, fs2)))
+ if (FileUtils.isSubDir(oldPath, destf, fs2)) {
+ FileUtils.trashFilesUnderDir(fs2, oldPath, conf);
+ }
+ if (inheritPerms) {
+ inheritFromTable(tablePath, destf, conf, destFs);
+ }
}
} catch (Exception e) {
//swallow the exception
- LOG.warn("Directory " + oldPath.toString() + " canot be removed:" + StringUtils.stringifyException(e));
+ LOG.warn("Directory " + oldPath.toString() + " cannot be removed: " + e, e);
}
}
@@ -2619,15 +2706,30 @@ private void constructOneLBLocationMap(F
LOG.warn("Error creating directory " + destf.toString());
}
if (inheritPerms && success) {
- destFs.setPermission(destfp, destFs.getFileStatus(destfp.getParent()).getPermission());
+ inheritFromTable(tablePath, destfp, conf, destFs);
}
}
- boolean b = renameFile(conf, srcs[0].getPath(), destf, destFs, true,
- isSrcLocal);
- if (!b) {
- throw new HiveException("Unable to move results from " + srcs[0].getPath()
- + " to destination directory: " + destf);
+ // Copy/move each file under the source directory to avoid to delete the destination
+ // directory if it is the root of an HDFS encryption zone.
+ for (List<Path[]> sdpairs : result) {
+ for (Path[] sdpair : sdpairs) {
+ Path destParent = sdpair[1].getParent();
+ FileSystem destParentFs = destParent.getFileSystem(conf);
+ if (!destParentFs.isDirectory(destParent)) {
+ boolean success = destFs.mkdirs(destParent);
+ if (!success) {
+ LOG.warn("Error creating directory " + destParent);
+ }
+ if (inheritPerms && success) {
+ inheritFromTable(tablePath, destParent, conf, destFs);
+ }
+ }
+ if (!moveFile(conf, sdpair[0], sdpair[1], destFs, true, isSrcLocal)) {
+ throw new IOException("Unable to move file/directory from " + sdpair[0] +
+ " to " + sdpair[1]);
+ }
+ }
}
} else { // srcf is a file or pattern containing wildcards
if (!destFs.exists(destf)) {
@@ -2636,13 +2738,13 @@ private void constructOneLBLocationMap(F
LOG.warn("Error creating directory " + destf.toString());
}
if (inheritPerms && success) {
- destFs.setPermission(destf, destFs.getFileStatus(destf.getParent()).getPermission());
+ inheritFromTable(tablePath, destf, conf, destFs);
}
}
// srcs must be a list of files -- ensured by LoadSemanticAnalyzer
for (List<Path[]> sdpairs : result) {
for (Path[] sdpair : sdpairs) {
- if (!renameFile(conf, sdpair[0], sdpair[1], destFs, true,
+ if (!moveFile(conf, sdpair[0], sdpair[1], destFs, true,
isSrcLocal)) {
throw new IOException("Error moving: " + sdpair[0] + " into: " + sdpair[1]);
}
@@ -2654,6 +2756,38 @@ private void constructOneLBLocationMap(F
}
}
+ /**
+ * This method sets all paths from tablePath to destf (including destf) to have same permission as tablePath.
+ * @param tablePath path of table
+ * @param destf path of table-subdir.
+ * @param conf
+ * @param fs
+ */
+ private static void inheritFromTable(Path tablePath, Path destf, HiveConf conf, FileSystem fs) {
+ if (!FileUtils.isSubDir(destf, tablePath, fs)) {
+ //partition may not be under the parent.
+ return;
+ }
+ HadoopShims shims = ShimLoader.getHadoopShims();
+ //Calculate all the paths from the table dir, to destf
+ //At end of this loop, currPath is table dir, and pathsToSet contain list of all those paths.
+ Path currPath = destf;
+ List<Path> pathsToSet = new LinkedList<Path>();
+ while (!currPath.equals(tablePath)) {
+ pathsToSet.add(currPath);
+ currPath = currPath.getParent();
+ }
+
+ try {
+ HadoopShims.HdfsFileStatus fullFileStatus = shims.getFullFileStatus(conf, fs, currPath);
+ for (Path pathToSet : pathsToSet) {
+ shims.setFullFileStatus(conf, fullFileStatus, fs, pathToSet);
+ }
+ } catch (Exception e) {
+ LOG.warn("Error setting permissions or group of " + destf, e);
+ }
+ }
+
public static boolean isHadoop1() {
return ShimLoader.getMajorVersion().startsWith("0.20");
}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreChecker.java Mon Jan 19 17:28:53 2015
@@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.Warehouse;
@@ -147,7 +148,7 @@ public class HiveMetaStoreChecker {
for (Path dbPath : dbPaths) {
FileSystem fs = dbPath.getFileSystem(conf);
- FileStatus[] statuses = fs.listStatus(dbPath);
+ FileStatus[] statuses = fs.listStatus(dbPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
for (FileStatus status : statuses) {
if (status.isDir() && !tableNames.contains(status.getPath().getName())) {
@@ -362,7 +363,7 @@ public class HiveMetaStoreChecker {
private void getAllLeafDirs(Path basePath, Set<Path> allDirs, FileSystem fs)
throws IOException {
- FileStatus[] statuses = fs.listStatus(basePath);
+ FileStatus[] statuses = fs.listStatus(basePath, FileUtils.HIDDEN_FILES_PATH_FILTER);
boolean directoryFound=false;
for (FileStatus status : statuses) {
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Mon Jan 19 17:28:53 2015
@@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.ProtectMode;
import org.apache.hadoop.hive.metastore.Warehouse;
@@ -336,7 +337,7 @@ public class Partition implements Serial
* partition String pathPattern = this.partPath.toString() + "/*"; try {
* FileSystem fs = FileSystem.get(this.table.getDataLocation(),
* Hive.get().getConf()); FileStatus srcs[] = fs.globStatus(new
- * Path(pathPattern)); numBuckets = srcs.length; } catch (Exception e) {
+ * Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER); numBuckets = srcs.length; } catch (Exception e) {
* throw new RuntimeException("Cannot get bucket count for table " +
* this.table.getName(), e); } } return numBuckets;
*/
@@ -372,7 +373,7 @@ public class Partition implements Serial
pathPattern = pathPattern + "/*";
}
LOG.info("Path pattern = " + pathPattern);
- FileStatus srcs[] = fs.globStatus(new Path(pathPattern));
+ FileStatus srcs[] = fs.globStatus(new Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER);
Arrays.sort(srcs);
for (FileStatus src : srcs) {
LOG.info("Got file: " + src.getPath());
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java Mon Jan 19 17:28:53 2015
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.ProtectMode;
@@ -636,7 +637,7 @@ public class Table implements Serializab
protected void replaceFiles(Path srcf, boolean isSrcLocal)
throws HiveException {
Path tableDest = getPath();
- Hive.replaceFiles(srcf, tableDest, tableDest, Hive.get().getConf(),
+ Hive.replaceFiles(tableDest, srcf, tableDest, tableDest, Hive.get().getConf(),
isSrcLocal);
}
@@ -951,7 +952,7 @@ public class Table implements Serializab
pathPattern = pathPattern + "/*";
}
LOG.info("Path pattern = " + pathPattern);
- FileStatus srcs[] = fs.globStatus(new Path(pathPattern));
+ FileStatus srcs[] = fs.globStatus(new Path(pathPattern), FileUtils.HIDDEN_FILES_PATH_FILTER);
Arrays.sort(srcs);
for (FileStatus src : srcs) {
LOG.info("Got file: " + src.getPath());
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java Mon Jan 19 17:28:53 2015
@@ -35,6 +35,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
import org.apache.hadoop.hive.ql.exec.Operator;
@@ -81,7 +82,7 @@ abstract public class AbstractBucketJoin
List<String> fileNames = new ArrayList<String>();
try {
FileSystem fs = location.getFileSystem(pGraphContext.getConf());
- FileStatus[] files = fs.listStatus(new Path(location.toString()));
+ FileStatus[] files = fs.listStatus(new Path(location.toString()), FileUtils.HIDDEN_FILES_PATH_FILTER);
if (files != null) {
for (FileStatus file : files) {
fileNames.add(file.getPath().toString());
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java Mon Jan 19 17:28:53 2015
@@ -29,6 +29,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.Index;
import org.apache.hadoop.hive.ql.Driver;
@@ -123,22 +124,23 @@ public final class IndexUtils {
Partition part) throws HiveException {
LOG.info("checking index staleness...");
try {
- FileSystem partFs = part.getDataLocation().getFileSystem(hive.getConf());
- FileStatus partFss = partFs.getFileStatus(part.getDataLocation());
- String ts = index.getParameters().get(part.getSpec().toString());
- if (ts == null) {
+ String indexTs = index.getParameters().get(part.getSpec().toString());
+ if (indexTs == null) {
return false;
}
- long indexTs = Long.parseLong(ts);
- LOG.info(partFss.getModificationTime());
- LOG.info(ts);
- if (partFss.getModificationTime() > indexTs) {
- LOG.info("index is stale on the partitions that matched " + part.getSpec());
- return false;
+
+ FileSystem partFs = part.getDataLocation().getFileSystem(hive.getConf());
+ FileStatus[] parts = partFs.listStatus(part.getDataLocation(), FileUtils.HIDDEN_FILES_PATH_FILTER);
+ for (FileStatus status : parts) {
+ if (status.getModificationTime() > Long.parseLong(indexTs)) {
+ LOG.info("Index is stale on partition '" + part.getName()
+ + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath()
+ + "' is higher than index creation time (" + indexTs + ").");
+ return false;
+ }
}
} catch (IOException e) {
- LOG.info("failed to grab timestamp info");
- throw new HiveException(e);
+ throw new HiveException("Failed to grab timestamp information from partition '" + part.getName() + "': " + e.getMessage(), e);
}
return true;
}
@@ -156,22 +158,23 @@ public final class IndexUtils {
for (Index index : indexes) {
LOG.info("checking index staleness...");
try {
- FileSystem srcFs = src.getPath().getFileSystem(hive.getConf());
- FileStatus srcFss= srcFs.getFileStatus(src.getPath());
- String ts = index.getParameters().get("base_timestamp");
- if (ts == null) {
+ String indexTs = index.getParameters().get("base_timestamp");
+ if (indexTs == null) {
return false;
}
- long indexTs = Long.parseLong(ts);
- LOG.info(srcFss.getModificationTime());
- LOG.info(ts);
- if (srcFss.getModificationTime() > indexTs) {
- LOG.info("index is stale ");
- return false;
+
+ FileSystem srcFs = src.getPath().getFileSystem(hive.getConf());
+ FileStatus[] srcs = srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER);
+ for (FileStatus status : srcs) {
+ if (status.getModificationTime() > Long.parseLong(indexTs)) {
+ LOG.info("Index is stale on table '" + src.getTableName()
+ + "'. Modified time (" + status.getModificationTime() + ") for '" + status.getPath()
+ + "' is higher than index creation time (" + indexTs + ").");
+ return false;
+ }
}
} catch (IOException e) {
- LOG.info("failed to grab timestamp info");
- throw new HiveException(e);
+ throw new HiveException("Failed to grab timestamp information from table '" + src.getTableName() + "': " + e.getMessage(), e);
}
}
return true;
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ExportSemanticAnalyzer.java Mon Jan 19 17:28:53 2015
@@ -28,6 +28,7 @@ import org.antlr.runtime.tree.Tree;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.exec.Task;
@@ -68,7 +69,7 @@ public class ExportSemanticAnalyzer exte
throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast,
"Target is not a directory : " + toURI));
} else {
- FileStatus[] files = fs.listStatus(toPath);
+ FileStatus[] files = fs.listStatus(toPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
if (files != null && files.length != 0) {
throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(ast,
"Target is not an empty directory : " + toURI));
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java Mon Jan 19 17:28:53 2015
@@ -35,6 +35,7 @@ import org.apache.commons.lang.ObjectUti
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.Warehouse;
@@ -357,7 +358,7 @@ public class ImportSemanticAnalyzer exte
throws IOException, SemanticException {
LOG.debug("checking emptiness of " + targetPath.toString());
if (fs.exists(targetPath)) {
- FileStatus[] status = fs.listStatus(targetPath);
+ FileStatus[] status = fs.listStatus(targetPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
if (status.length > 0) {
LOG.debug("Files inc. " + status[0].getPath().toString()
+ " found in path : " + targetPath.toString());
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Mon Jan 19 17:28:53 2015
@@ -22,6 +22,7 @@ import static org.apache.hadoop.hive.con
import java.io.IOException;
import java.io.Serializable;
+import java.security.AccessControlException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -45,8 +46,10 @@ import org.antlr.runtime.tree.TreeWizard
import org.antlr.runtime.tree.TreeWizard.ContextVisitor;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.ObjectPair;
import org.apache.hadoop.hive.common.StatsSetupConst;
@@ -201,9 +204,12 @@ import org.apache.hadoop.hive.serde2.typ
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.OutputFormat;
+import org.apache.hadoop.security.UserGroupInformation;
/**
* Implementation of the semantic analyzer. It generates the query plan.
@@ -1667,7 +1673,7 @@ public class SemanticAnalyzer extends Ba
throw new SemanticException(e);
}
try {
- fname = ctx.getExternalTmpPath(
+ fname = ctx.getExtTmpPathRelTo(
FileUtils.makeQualified(location, conf)).toString();
} catch (Exception e) {
throw new SemanticException(generateErrorMessage(ast,
@@ -1683,8 +1689,9 @@ public class SemanticAnalyzer extends Ba
} else {
// This is the only place where isQuery is set to true; it defaults to false.
qb.setIsQuery(true);
- fname = ctx.getMRTmpPath().toString();
- ctx.setResDir(new Path(fname));
+ Path stagingPath = getStagingDirectoryPathname(qb);
+ fname = stagingPath.toString();
+ ctx.setResDir(stagingPath);
}
}
qb.getMetaData().setDestForAlias(name, fname,
@@ -1740,6 +1747,160 @@ public class SemanticAnalyzer extends Ba
}
}
+ /**
+ * Checks if a given path is encrypted (valid only for HDFS files)
+ * @param path The path to check for encryption
+ * @return True if the path is encrypted; False if it is not encrypted
+ * @throws HiveException If an error occurs while checking for encryption
+ */
+ private boolean isPathEncrypted(Path path) throws HiveException {
+ HadoopShims.HdfsEncryptionShim hdfsEncryptionShim;
+
+ hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim();
+ if (hdfsEncryptionShim != null) {
+ try {
+ if (hdfsEncryptionShim.isPathEncrypted(path)) {
+ return true;
+ }
+ } catch (Exception e) {
+ throw new HiveException("Unable to determine if " + path + "is encrypted: " + e, e);
+ }
+ }
+
+ return false;
+ }
+
+ /**
+ * Compares to path key encryption strenghts.
+ *
+ * @param p1 Path to an HDFS file system
+ * @param p2 Path to an HDFS file system
+ * @return -1 if strength is weak; 0 if is equals; 1 if it is stronger
+ * @throws HiveException If an error occurs while comparing key strengths.
+ */
+ private int comparePathKeyStrength(Path p1, Path p2) throws HiveException {
+ HadoopShims.HdfsEncryptionShim hdfsEncryptionShim;
+
+ hdfsEncryptionShim = SessionState.get().getHdfsEncryptionShim();
+ if (hdfsEncryptionShim != null) {
+ try {
+ return hdfsEncryptionShim.comparePathKeyStrength(p1, p2);
+ } catch (Exception e) {
+ throw new HiveException("Unable to compare key strength for " + p1 + " and " + p2 + " : " + e, e);
+ }
+ }
+
+ return 0; // Non-encrypted path (or equals strength)
+ }
+
+ /**
+ * Checks if a given path has read-only access permissions.
+ *
+ * @param path The path to check for read-only permissions.
+ * @return True if the path is read-only; False otherwise.
+ * @throws HiveException If an error occurs while checking file permissions.
+ */
+ private boolean isPathReadOnly(Path path) throws HiveException {
+ HiveConf conf = SessionState.get().getConf();
+ try {
+ FileSystem fs = path.getFileSystem(conf);
+ UserGroupInformation ugi = Utils.getUGI();
+ FileStatus status = fs.getFileStatus(path);
+
+ // We just check for writing permissions. If it fails with AccessControException, then it
+ // means the location may be read-only.
+ FileUtils.checkFileAccessWithImpersonation(fs, status, FsAction.WRITE, ugi.getUserName());
+
+ // Path has writing permissions
+ return false;
+ } catch (AccessControlException e) {
+ // An AccessControlException may be caused for other different errors,
+ // but we take it as if our path is read-only
+ return true;
+ } catch (Exception e) {
+ throw new HiveException("Unable to determine if " + path + " is read only: " + e, e);
+ }
+ }
+
+ /**
+ * Gets the strongest encrypted table path.
+ *
+ * @param qb The QB object that contains a list of all table locations.
+ * @return The strongest encrypted path
+ * @throws HiveException if an error occurred attempting to compare the encryption strength
+ */
+ private Path getStrongestEncryptedTablePath(QB qb) throws HiveException {
+ List<String> tabAliases = new ArrayList<String>(qb.getTabAliases());
+ Path strongestPath = null;
+
+ /* Walk through all found table locations to get the most encrypted table */
+ for (String alias : tabAliases) {
+ Table tab = qb.getMetaData().getTableForAlias(alias);
+ if (tab != null) {
+ Path tablePath = tab.getDataLocation();
+ if (tablePath != null) {
+ try {
+ if (strongestPath == null) {
+ strongestPath = tablePath;
+ } else if ("hdfs".equals(tablePath.toUri().getScheme())
+ && isPathEncrypted(tablePath)
+ && comparePathKeyStrength(tablePath, strongestPath) > 0)
+ {
+ strongestPath = tablePath;
+ }
+ } catch (HiveException e) {
+ throw new HiveException("Unable to find the most secure table path: " + e, e);
+ }
+ }
+ }
+ }
+
+ return strongestPath;
+ }
+
+ /**
+ * Gets the staging directory where MR files will be stored temporary.
+ * It walks through the QB plan to find the correct location where save temporary files. This
+ * temporary location (or staging directory) may be created inside encrypted tables locations for
+ * security reasons. If the QB has read-only tables, then the older scratch directory will be used,
+ * or a permission error will be thrown if the requested query table is encrypted and the old scratch
+ * directory is not.
+ *
+ * @param qb The QB object that contains a list of all table locations.
+ * @return The path to the staging directory.
+ * @throws HiveException If an error occurs while identifying the correct staging location.
+ */
+ private Path getStagingDirectoryPathname(QB qb) throws HiveException {
+ Path stagingPath = null, tablePath;
+
+ // Looks for the most encrypted table location (if there is one)
+ tablePath = getStrongestEncryptedTablePath(qb);
+ if (tablePath != null && isPathEncrypted(tablePath)) {
+ // Only HDFS paths can be checked for encryption
+ if ("hdfs".equals(tablePath.toUri().getScheme())) {
+ if (isPathReadOnly(tablePath)) {
+ Path tmpPath = ctx.getMRTmpPath();
+ if (comparePathKeyStrength(tablePath, tmpPath) < 0) {
+ throw new HiveException("Read-only encrypted tables cannot be read " +
+ "if the scratch directory is not encrypted (or encryption is weak)");
+ } else {
+ stagingPath = tmpPath;
+ }
+ }
+ } else {
+ LOG.debug("Encryption is not applicable to table path " + tablePath.toString());
+ }
+
+ if (stagingPath == null) {
+ stagingPath = ctx.getMRTmpPath(tablePath.toUri());
+ }
+ } else {
+ stagingPath = ctx.getMRTmpPath();
+ }
+
+ return stagingPath;
+ }
+
private void replaceViewReferenceWithDefinition(QB qb, Table tab,
String tab_name, String alias) throws SemanticException {
@@ -5950,7 +6111,7 @@ public class SemanticAnalyzer extends Ba
if (isNonNativeTable) {
queryTmpdir = dest_path;
} else {
- queryTmpdir = ctx.getExternalTmpPath(dest_path);
+ queryTmpdir = ctx.getExtTmpPathRelTo(dest_path);
}
if (dpCtx != null) {
// set the root of the temporary path where dynamic partition columns will populate
@@ -6131,7 +6292,7 @@ public class SemanticAnalyzer extends Ba
try {
Path qPath = FileUtils.makeQualified(dest_path, conf);
- queryTmpdir = ctx.getExternalTmpPath(qPath);
+ queryTmpdir = ctx.getExtTmpPathRelTo(qPath);
} catch (Exception e) {
throw new SemanticException("Error creating temporary folder on: "
+ dest_path, e);
@@ -6312,7 +6473,7 @@ public class SemanticAnalyzer extends Ba
// it should be the same as the MoveWork's sourceDir.
fileSinkDesc.setStatsAggPrefix(fileSinkDesc.getDirName().toString());
if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {
- String statsTmpLoc = ctx.getExternalTmpPath(queryTmpdir).toString();
+ String statsTmpLoc = ctx.getExtTmpPathRelTo(queryTmpdir).toString();
LOG.info("Set stats collection dir : " + statsTmpLoc);
conf.set(StatsSetupConst.STATS_TMP_LOC, statsTmpLoc);
}
@@ -9460,7 +9621,7 @@ public class SemanticAnalyzer extends Ba
tsDesc.setGatherStats(false);
} else {
if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {
- String statsTmpLoc = ctx.getExternalTmpPath(tab.getPath()).toString();
+ String statsTmpLoc = ctx.getExtTmpPathRelTo(tab.getPath()).toString();
LOG.info("Set stats collection dir : " + statsTmpLoc);
conf.set(StatsSetupConst.STATS_TMP_LOC, statsTmpLoc);
}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandProcessorFactory.java Mon Jan 19 17:28:53 2015
@@ -29,6 +29,7 @@ import java.util.Set;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.metadata.*;
import org.apache.hadoop.hive.ql.session.SessionState;
/**
@@ -49,8 +50,14 @@ public final class CommandProcessorFacto
}
public static CommandProcessor getForHiveCommand(String[] cmd, HiveConf conf)
- throws SQLException {
- HiveCommand hiveCommand = HiveCommand.find(cmd);
+ throws SQLException {
+ return getForHiveCommandInternal(cmd, conf, false);
+ }
+
+ public static CommandProcessor getForHiveCommandInternal(String[] cmd, HiveConf conf,
+ boolean testOnly)
+ throws SQLException {
+ HiveCommand hiveCommand = HiveCommand.find(cmd, testOnly);
if (hiveCommand == null || isBlank(cmd[0])) {
return null;
}
@@ -58,7 +65,8 @@ public final class CommandProcessorFacto
conf = new HiveConf();
}
Set<String> availableCommands = new HashSet<String>();
- for (String availableCommand : conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST).split(",")) {
+ for (String availableCommand : conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST)
+ .split(",")) {
availableCommands.add(availableCommand.toLowerCase().trim());
}
if (!availableCommands.contains(cmd[0].trim().toLowerCase())) {
@@ -82,6 +90,12 @@ public final class CommandProcessorFacto
return new CompileProcessor();
case RELOAD:
return new ReloadProcessor();
+ case CRYPTO:
+ try {
+ return new CryptoProcessor(SessionState.get().getHdfsEncryptionShim(), conf);
+ } catch (HiveException e) {
+ throw new SQLException("Fail to start the command processor due to the exception: ", e);
+ }
default:
throw new AssertionError("Unknown HiveCommand " + hiveCommand);
}
Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java?rev=1653062&view=auto
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java (added)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/CryptoProcessor.java Mon Jan 19 17:28:53 2015
@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.processors;
+
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CommandNeedRetryException;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.shims.HadoopShims;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+/**
+ * This class processes HADOOP commands used for HDFS encryption. It is meant to be run
+ * only by Hive unit & queries tests.
+ */
+public class CryptoProcessor implements CommandProcessor {
+ public static final Log LOG = LogFactory.getLog(CryptoProcessor.class.getName());
+
+ private HadoopShims.HdfsEncryptionShim encryptionShim;
+
+ private Options CREATE_KEY_OPTIONS;
+ private Options DELETE_KEY_OPTIONS;
+ private Options CREATE_ZONE_OPTIONS;
+
+ private int DEFAULT_BIT_LENGTH = 128;
+
+ private HiveConf conf;
+
+ public CryptoProcessor(HadoopShims.HdfsEncryptionShim encryptionShim, HiveConf conf) {
+ this.encryptionShim = encryptionShim;
+ this.conf = conf;
+
+ CREATE_KEY_OPTIONS = new Options();
+ CREATE_KEY_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("keyName").isRequired().create());
+ CREATE_KEY_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("bitLength").create()); // optional
+
+ DELETE_KEY_OPTIONS = new Options();
+ DELETE_KEY_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("keyName").isRequired().create());
+
+ CREATE_ZONE_OPTIONS = new Options();
+ CREATE_ZONE_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("keyName").isRequired().create());
+ CREATE_ZONE_OPTIONS.addOption(OptionBuilder.hasArg().withLongOpt("path").isRequired().create());
+ }
+
+ private CommandLine parseCommandArgs(final Options opts, String[] args) throws ParseException {
+ CommandLineParser parser = new GnuParser();
+ return parser.parse(opts, args);
+ }
+
+ private CommandProcessorResponse returnErrorResponse(final String errmsg) {
+ return new CommandProcessorResponse(1, "Encryption Processor Helper Failed:" + errmsg, null);
+ }
+
+ private void writeTestOutput(final String msg) {
+ SessionState.get().out.println(msg);
+ }
+
+ @Override
+ public void init() {
+ }
+
+ @Override
+ public CommandProcessorResponse run(String command) throws CommandNeedRetryException {
+ String[] args = command.split("\\s+");
+
+ if (args.length < 1) {
+ return returnErrorResponse("Command arguments are empty.");
+ }
+
+ if (encryptionShim == null) {
+ return returnErrorResponse("Hadoop encryption shim is not initialized.");
+ }
+
+ String action = args[0];
+ String params[] = Arrays.copyOfRange(args, 1, args.length);
+
+ try {
+ if (action.equalsIgnoreCase("create_key")) {
+ createEncryptionKey(params);
+ } else if (action.equalsIgnoreCase("create_zone")) {
+ createEncryptionZone(params);
+ } else if (action.equalsIgnoreCase("delete_key")) {
+ deleteEncryptionKey(params);
+ } else {
+ return returnErrorResponse("Unknown command action: " + action);
+ }
+ } catch (Exception e) {
+ return returnErrorResponse(e.getMessage());
+ }
+
+ return new CommandProcessorResponse(0);
+ }
+
+ /**
+ * Creates an encryption key using the parameters passed through the 'create_key' action.
+ *
+ * @param params Parameters passed to the 'create_key' command action.
+ * @throws Exception If key creation failed.
+ */
+ private void createEncryptionKey(String[] params) throws Exception {
+ CommandLine args = parseCommandArgs(CREATE_KEY_OPTIONS, params);
+
+ String keyName = args.getOptionValue("keyName");
+ String bitLength = args.getOptionValue("bitLength", Integer.toString(DEFAULT_BIT_LENGTH));
+
+ try {
+ encryptionShim.createKey(keyName, new Integer(bitLength));
+ } catch (Exception e) {
+ throw new Exception("Cannot create encryption key: " + e.getMessage());
+ }
+
+ writeTestOutput("Encryption key created: '" + keyName + "'");
+ }
+
+ /**
+ * Creates an encryption zone using the parameters passed through the 'create_zone' action.
+ *
+ * @param params Parameters passed to the 'create_zone' command action.
+ * @throws Exception If zone creation failed.
+ */
+ private void createEncryptionZone(String[] params) throws Exception {
+ CommandLine args = parseCommandArgs(CREATE_ZONE_OPTIONS, params);
+
+ String keyName = args.getOptionValue("keyName");
+ Path cryptoZone = new Path(args.getOptionValue("path"));
+ if (cryptoZone == null) {
+ throw new Exception("Cannot create encryption zone: Invalid path '"
+ + args.getOptionValue("path") + "'");
+ }
+
+ try {
+ encryptionShim.createEncryptionZone(cryptoZone, keyName);
+ } catch (IOException e) {
+ throw new Exception("Cannot create encryption zone: " + e.getMessage());
+ }
+
+ writeTestOutput("Encryption zone created: '" + cryptoZone + "' using key: '" + keyName + "'");
+ }
+
+ /**
+ * Deletes an encryption key using the parameters passed through the 'delete_key' action.
+ *
+ * @param params Parameters passed to the 'delete_key' command action.
+ * @throws Exception If key deletion failed.
+ */
+ private void deleteEncryptionKey(String[] params) throws Exception {
+ CommandLine args = parseCommandArgs(DELETE_KEY_OPTIONS, params);
+
+ String keyName = args.getOptionValue("keyName");
+ try {
+ encryptionShim.deleteKey(keyName);
+ } catch (IOException e) {
+ throw new Exception("Cannot delete encryption key: " + e.getMessage());
+ }
+
+ writeTestOutput("Encryption key deleted: '" + keyName + "'");
+ }
+}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java Mon Jan 19 17:28:53 2015
@@ -29,18 +29,40 @@ public enum HiveCommand {
SET(),
RESET(),
DFS(),
+ CRYPTO(true),
ADD(),
LIST(),
RELOAD(),
DELETE(),
COMPILE();
+
+ public static boolean ONLY_FOR_TESTING = true;
+ private boolean usedOnlyForTesting;
+
+ HiveCommand() {
+ this(false);
+ }
+
+ HiveCommand(boolean onlyForTesting) {
+ this.usedOnlyForTesting = onlyForTesting;
+ }
+
+ public boolean isOnlyForTesting() {
+ return this.usedOnlyForTesting;
+ }
+
private static final Set<String> COMMANDS = new HashSet<String>();
static {
for (HiveCommand command : HiveCommand.values()) {
COMMANDS.add(command.name());
}
}
+
public static HiveCommand find(String[] command) {
+ return find(command, false);
+ }
+
+ public static HiveCommand find(String[] command, boolean findOnlyForTesting) {
if (null == command){
return null;
}
@@ -54,7 +76,13 @@ public enum HiveCommand {
//special handling for SQL "delete from <table> where..."
return null;
} else if (COMMANDS.contains(cmd)) {
- return HiveCommand.valueOf(cmd);
+ HiveCommand hiveCommand = HiveCommand.valueOf(cmd);
+
+ if (findOnlyForTesting == hiveCommand.isOnlyForTesting()) {
+ return hiveCommand;
+ }
+
+ return null;
}
}
return null;
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java Mon Jan 19 17:28:53 2015
@@ -66,6 +66,7 @@ import org.apache.hadoop.hive.ql.securit
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext.CLIENT_TYPE;
import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactoryImpl;
import org.apache.hadoop.hive.ql.util.DosToUnix;
+import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.security.UserGroupInformation;
@@ -184,6 +185,11 @@ public class SessionState {
private SparkSession sparkSession;
/**
+ * Gets information about HDFS encryption
+ */
+ private HadoopShims.HdfsEncryptionShim hdfsEncryptionShim;
+
+ /**
* Lineage state.
*/
LineageState ls;
@@ -378,6 +384,23 @@ public class SessionState {
return txnAutoCommit;
}
+ public HadoopShims.HdfsEncryptionShim getHdfsEncryptionShim() throws HiveException {
+ if (hdfsEncryptionShim == null) {
+ try {
+ FileSystem fs = FileSystem.get(conf);
+ if ("hdfs".equals(fs.getUri().getScheme())) {
+ hdfsEncryptionShim = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf);
+ } else {
+ LOG.info("Could not get hdfsEncryptionShim, it is only applicable to hdfs filesystem.");
+ }
+ } catch (Exception e) {
+ throw new HiveException(e);
+ }
+ }
+
+ return hdfsEncryptionShim;
+ }
+
/**
* Singleton Session object per thread.
*
@@ -410,7 +433,6 @@ public class SessionState {
* when switching from one session to another.
*/
public static SessionState start(SessionState startSs) {
-
setCurrentSessionState(startSs);
if (startSs.hiveHist == null){
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java Mon Jan 19 17:28:53 2015
@@ -22,6 +22,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.ValidTxnList;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.CompactionRequest;
@@ -262,7 +263,7 @@ public class Initiator extends Compactor
private long sumDirSize(FileSystem fs, Path dir) throws IOException {
long size = 0;
- FileStatus[] buckets = fs.listStatus(dir);
+ FileStatus[] buckets = fs.listStatus(dir, FileUtils.HIDDEN_FILES_PATH_FILTER);
for (int i = 0; i < buckets.length; i++) {
size += buckets[i].getLen();
}
Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java Mon Jan 19 17:28:53 2015
@@ -101,7 +101,7 @@ public class TestSymlinkTextInputFormat
}
/**
- * Test combine symlink text input file. Two input dir, and each contails one
+ * Test combine symlink text input file. Two input dir, and each contains one
* file, and then create one symlink file containing these 2 files. Normally
* without combine, it will return at least 2 splits
*/
@@ -166,7 +166,11 @@ public class TestSymlinkTextInputFormat
}
String cmd = "select key*1 from " + tblName;
- drv.compile(cmd);
+ ecode = drv.compile(cmd);
+ if (ecode != 0) {
+ throw new Exception("Select compile: " + cmd
+ + " failed with exit code= " + ecode);
+ }
//create scratch dir
Context ctx = new Context(newJob);
Modified: hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java (original)
+++ hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/processors/TestCommandProcessorFactory.java Mon Jan 19 17:28:53 2015
@@ -20,15 +20,17 @@ package org.apache.hadoop.hive.ql.proces
import java.sql.SQLException;
-import junit.framework.Assert;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.session.SessionState;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestCommandProcessorFactory {
+ private final String[] testOnlyCommands = new String[]{"crypto"};
+
private HiveConf conf;
@Before
@@ -38,27 +40,39 @@ public class TestCommandProcessorFactory
@Test
public void testInvalidCommands() throws Exception {
- Assert.assertNull("Null should have returned null", CommandProcessorFactory.getForHiveCommand(null, conf));
- Assert.assertNull("Blank should have returned null", CommandProcessorFactory.getForHiveCommand(new String[]{" "}, conf));
- Assert.assertNull("set role should have returned null", CommandProcessorFactory.getForHiveCommand(new String[]{"set role"}, conf));
- Assert.assertNull("SQL should have returned null", CommandProcessorFactory.getForHiveCommand(new String[]{"SELECT * FROM TABLE"}, conf));
+ Assert.assertNull("Null should have returned null",
+ CommandProcessorFactory.getForHiveCommand(null, conf));
+ Assert.assertNull("Blank should have returned null",
+ CommandProcessorFactory.getForHiveCommand(new String[]{" "}, conf));
+ Assert.assertNull("Set role should have returned null",
+ CommandProcessorFactory.getForHiveCommand(new String[]{"set role"}, conf));
+ Assert.assertNull("SQL should have returned null",
+ CommandProcessorFactory.getForHiveCommand(new String[]{"SELECT * FROM TABLE"}, conf));
+ Assert.assertNull("Test only command should have returned null",
+ CommandProcessorFactory.getForHiveCommand(new String[]{"CRYPTO --listZones"}, conf));
}
+
@Test
public void testAvailableCommands() throws Exception {
+ enableTestOnlyCmd(conf);
SessionState.start(conf);
+
for (HiveCommand command : HiveCommand.values()) {
String cmd = command.name();
- Assert.assertNotNull("Cmd " + cmd + " not return null", CommandProcessorFactory.getForHiveCommand(new String[]{cmd}, conf));
- }
- for (HiveCommand command : HiveCommand.values()) {
- String cmd = command.name().toLowerCase();
- Assert.assertNotNull("Cmd " + cmd + " not return null", CommandProcessorFactory.getForHiveCommand(new String[]{cmd}, conf));
+ String cmdInLowerCase = cmd.toLowerCase();
+ Assert.assertNotNull("Cmd " + cmd + " not return null",
+ CommandProcessorFactory
+ .getForHiveCommandInternal(new String[]{cmd}, conf, command.isOnlyForTesting()));
+ Assert.assertNotNull("Cmd " + cmd + " not return null",
+ CommandProcessorFactory.getForHiveCommandInternal(
+ new String[]{cmdInLowerCase}, conf, command.isOnlyForTesting()));
}
conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), "");
for (HiveCommand command : HiveCommand.values()) {
String cmd = command.name();
try {
- CommandProcessorFactory.getForHiveCommand(new String[]{cmd}, conf);
+ CommandProcessorFactory
+ .getForHiveCommandInternal(new String[]{cmd}, conf, command.isOnlyForTesting());
Assert.fail("Expected SQLException for " + cmd + " as available commands is empty");
} catch (SQLException e) {
Assert.assertEquals("Insufficient privileges to execute " + cmd, e.getMessage());
@@ -66,4 +80,13 @@ public class TestCommandProcessorFactory
}
}
}
+
+ private void enableTestOnlyCmd(HiveConf conf){
+ StringBuilder securityCMDs = new StringBuilder(conf.getVar(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST));
+ for(String c : testOnlyCommands){
+ securityCMDs.append(",");
+ securityCMDs.append(c);
+ }
+ conf.set(HiveConf.ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.toString(), securityCMDs.toString());
+ }
}
Added: hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q?rev=1653062&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_dynamic.q Mon Jan 19 17:28:53 2015
@@ -0,0 +1,57 @@
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+
+-- SORT_QUERY_RESULTS
+
+-- init
+drop table IF EXISTS encryptedTable;
+drop table IF EXISTS unencryptedTable;
+
+create table encryptedTable(value string)
+ partitioned by (key string) clustered by (value) into 2 buckets stored as orc
+ LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encryptedTable' TBLPROPERTIES ('transactional'='true');
+CRYPTO CREATE_KEY --keyName key_1 --bitLength 128;
+CRYPTO CREATE_ZONE --keyName key_1 --path ${hiveconf:hive.metastore.warehouse.dir}/encryptedTable;
+
+create table unencryptedTable(value string)
+ partitioned by (key string) clustered by (value) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+
+-- insert encrypted table from values
+explain extended insert into table encryptedTable partition (key) values
+ ('val_501', '501'),
+ ('val_502', '502');
+
+insert into table encryptedTable partition (key) values
+ ('val_501', '501'),
+ ('val_502', '502');
+
+select * from encryptedTable order by key;
+
+-- insert encrypted table from unencrypted source
+explain extended from src
+insert into table encryptedTable partition (key)
+ select value, key limit 2;
+
+from src
+insert into table encryptedTable partition (key)
+ select value, key limit 2;
+
+select * from encryptedTable order by key;
+
+-- insert unencrypted table from encrypted source
+explain extended from encryptedTable
+insert into table unencryptedTable partition (key)
+ select value, key;
+
+from encryptedTable
+insert into table unencryptedTable partition (key)
+ select value, key;
+
+select * from unencryptedTable order by key;
+
+-- clean up
+drop table encryptedTable;
+CRYPTO DELETE_KEY --keyName key_1;
+drop table unencryptedTable;
\ No newline at end of file
Added: hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q?rev=1653062&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q Mon Jan 19 17:28:53 2015
@@ -0,0 +1,62 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.enforce.bucketing=true;
+
+-- SORT_QUERY_RESULTS
+
+-- init
+drop table IF EXISTS encryptedTable;
+drop table IF EXISTS unencryptedTable;
+
+create table encryptedTable(key string,
+ value string) partitioned by (ds string) clustered by (key) into 2 buckets stored as orc
+ LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encryptedTable' TBLPROPERTIES ('transactional'='true');
+CRYPTO CREATE_KEY --keyName key_1 --bitLength 128;
+CRYPTO CREATE_ZONE --keyName key_1 --path ${hiveconf:hive.metastore.warehouse.dir}/encryptedTable;
+
+create table unencryptedTable(key string,
+ value string) partitioned by (ds string) clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+
+-- insert encrypted table from values
+explain extended insert into table encryptedTable partition
+ (ds='today') values
+ ('501', 'val_501'),
+ ('502', 'val_502');
+
+insert into table encryptedTable partition
+ (ds='today') values
+ ('501', 'val_501'),
+ ('502', 'val_502');
+
+select * from encryptedTable order by key;
+
+-- insert encrypted table from unencrypted source
+explain extended from src
+insert into table encryptedTable partition
+ (ds='yesterday')
+ select * limit 2;
+
+from src
+insert into table encryptedTable partition
+ (ds='yesterday')
+ select * limit 2;
+
+select * from encryptedTable order by key;
+
+-- insert unencrypted table from encrypted source
+explain extended from encryptedTable
+insert into table unencryptedTable partition
+ (ds='today')
+ select key, value;
+
+from encryptedTable
+insert into table unencryptedTable partition
+ (ds='today')
+ select key, value;
+
+select * from unencryptedTable order by key;
+
+-- clean up
+drop table encryptedTable;
+CRYPTO DELETE_KEY --keyName key_1;
+drop table unencryptedTable;
\ No newline at end of file
Added: hive/trunk/ql/src/test/queries/clientpositive/encryption_join_unencrypted_tbl.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/encryption_join_unencrypted_tbl.q?rev=1653062&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/encryption_join_unencrypted_tbl.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/encryption_join_unencrypted_tbl.q Mon Jan 19 17:28:53 2015
@@ -0,0 +1,15 @@
+--SORT_QUERY_RESULTS
+
+DROP TABLE IF EXISTS encrypted_table;
+CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table';
+CRYPTO CREATE_KEY --keyName key_128 --bitLength 128;
+CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table;
+
+INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src;
+
+SELECT * FROM encrypted_table;
+
+EXPLAIN EXTENDED SELECT * FROM src t1 JOIN encrypted_table t2 WHERE t1.key = t2.key;
+
+drop table encrypted_table;
+CRYPTO DELETE_KEY --keyName key_128;
\ No newline at end of file
Added: hive/trunk/ql/src/test/queries/clientpositive/encryption_join_with_different_encryption_keys.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/encryption_join_with_different_encryption_keys.q?rev=1653062&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/encryption_join_with_different_encryption_keys.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/encryption_join_with_different_encryption_keys.q Mon Jan 19 17:28:53 2015
@@ -0,0 +1,24 @@
+--SORT_QUERY_RESULTS
+
+-- Java JCE must be installed in order to hava a key length of 256 bits
+DROP TABLE IF EXISTS table_key_1;
+CREATE TABLE table_key_1 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/table_key_1';
+CRYPTO CREATE_KEY --keyName key_1 --bitLength 128;
+CRYPTO CREATE_ZONE --keyName key_1 --path ${hiveconf:hive.metastore.warehouse.dir}/table_key_1;
+
+DROP TABLE IF EXISTS table_key_2;
+CREATE TABLE table_key_2 (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/table_key_2';
+CRYPTO CREATE_KEY --keyName key_2 --bitLength 256;
+CRYPTO CREATE_ZONE --keyName key_2 --path ${hiveconf:hive.metastore.warehouse.dir}/table_key_2;
+
+INSERT OVERWRITE TABLE table_key_1 SELECT * FROM src;
+INSERT OVERWRITE TABLE table_key_2 SELECT * FROM src;
+
+EXPLAIN EXTENDED SELECT * FROM table_key_1 t1 JOIN table_key_2 t2 WHERE (t1.key = t2.key);
+SELECT * FROM table_key_1 t1 JOIN table_key_2 t2 WHERE (t1.key = t2.key);
+
+DROP TABLE table_key_1;
+DROP TABLE table_key_2;
+
+CRYPTO DELETE_KEY --keyName key_1;
+CRYPTO DELETE_KEY --keyName key_2;
\ No newline at end of file
Added: hive/trunk/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q?rev=1653062&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/encryption_load_data_to_encrypted_tables.q Mon Jan 19 17:28:53 2015
@@ -0,0 +1,20 @@
+DROP TABLE IF EXISTS encrypted_table;
+
+CREATE TABLE encrypted_table (key STRING, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/encrypted_table';
+
+-- Create encryption key and zone;
+crypto create_key --keyName key1;
+crypto create_zone --keyName key1 --path ${hiveconf:hive.metastore.warehouse.dir}/encrypted_table;
+
+-- Test loading data from the local filesystem;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE encrypted_table;
+SELECT * FROM encrypted_table;
+
+-- Test loading data from the hdfs filesystem;
+dfs -copyFromLocal ../../data/files/kv1.txt hdfs:///tmp/kv1.txt;
+LOAD DATA INPATH '/tmp/kv1.txt' OVERWRITE INTO TABLE encrypted_table;
+SELECT * FROM encrypted_table;
+
+DROP TABLE encrypted_table;
+
+crypto delete_key --keyName key1;
\ No newline at end of file
Added: hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q?rev=1653062&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_encrypted_tbl.q Mon Jan 19 17:28:53 2015
@@ -0,0 +1,16 @@
+-- SORT_QUERY_RESULTS
+
+DROP TABLE IF EXISTS encrypted_table;
+CREATE TABLE encrypted_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table';
+
+CRYPTO CREATE_KEY --keyName key_128 --bitLength 128;
+CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table;
+
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE encrypted_table;
+
+dfs -chmod -R 555 ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table;
+
+SELECT count(*) FROM encrypted_table;
+
+drop table encrypted_table;
+CRYPTO DELETE_KEY --keyName key_128;
Added: hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_unencrypted_tbl.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_unencrypted_tbl.q?rev=1653062&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_unencrypted_tbl.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/encryption_select_read_only_unencrypted_tbl.q Mon Jan 19 17:28:53 2015
@@ -0,0 +1,12 @@
+-- SORT_QUERY_RESULTS
+
+DROP TABLE IF EXISTS unencrypted_table;
+CREATE TABLE unencrypted_table(key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/unencrypted_table';
+
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE unencrypted_table;
+
+dfs -chmod -R 555 ${hiveconf:hive.metastore.warehouse.dir}/default/unencrypted_table;
+
+SELECT count(*) FROM unencrypted_table;
+
+drop table unencrypted_table;
\ No newline at end of file
Modified: hive/trunk/ql/src/test/queries/clientpositive/smb_mapjoin_11.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/smb_mapjoin_11.q?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/smb_mapjoin_11.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/smb_mapjoin_11.q Mon Jan 19 17:28:53 2015
@@ -29,6 +29,13 @@ INSERT OVERWRITE TABLE test_table3 PARTI
INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1';
+SELECT * FROM test_table1 ORDER BY key;
+SELECT * FROM test_table3 ORDER BY key;
+EXPLAIN EXTENDED SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16);
+EXPLAIN EXTENDED SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16);
+SELECT * FROM test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16);
+SELECT * FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16);
+
-- Join data from a sampled bucket to verify the data is bucketed
SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1';
Modified: hive/trunk/ql/src/test/results/clientnegative/fs_default_name2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/fs_default_name2.q.out?rev=1653062&r1=1653061&r2=1653062&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/fs_default_name2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientnegative/fs_default_name2.q.out Mon Jan 19 17:28:53 2015
@@ -1 +1 @@
-FAILED: IllegalArgumentException Illegal character in scheme name at index 0: 'http://www.example.com
+FAILED: SemanticException java.lang.IllegalArgumentException: Illegal character in scheme name at index 0: 'http://www.example.com