You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2014/06/02 05:10:17 UTC
svn commit: r1599085 - in /hive/trunk:
common/src/java/org/apache/hadoop/hive/common/
itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/
ql/src/java/org/apache/hadoop/hive/ql/metadata/
shims/0.20/src/main/java/org/apache/hadoop/hive/shi...
Author: xuefu
Date: Mon Jun 2 03:10:16 2014
New Revision: 1599085
URL: http://svn.apache.org/r1599085
Log:
HIVE-7119: Extended ACL's should be inherited if warehouse perm inheritance enabled (Szehon via Xuefu)
Added:
hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java
Modified:
hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
hive/trunk/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
hive/trunk/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
hive/trunk/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
hive/trunk/shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
hive/trunk/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java?rev=1599085&r1=1599084&r2=1599085&view=diff
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java (original)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/common/FileUtils.java Mon Jun 2 03:10:16 2014
@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.F
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatus;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Shell;
@@ -483,29 +484,23 @@ public final class FileUtils {
} catch (FileNotFoundException ignore) {
}
//inherit perms: need to find last existing parent path, and apply its permission on entire subtree.
- Path path = f;
- List<Path> pathsToSet = new ArrayList<Path>();
- while (!fs.exists(path)) {
- pathsToSet.add(path);
- path = path.getParent();
+ Path lastExistingParent = f;
+ Path firstNonExistentParent = null;
+ while (!fs.exists(lastExistingParent)) {
+ firstNonExistentParent = lastExistingParent;
+ lastExistingParent = lastExistingParent.getParent();
}
- //at the end of this loop, path is the last-existing parent path.
boolean success = fs.mkdirs(f);
if (!success) {
return false;
} else {
- FsPermission parentPerm = fs.getFileStatus(path).getPermission();
- String permString = Integer.toString(parentPerm.toShort(), 8);
- for (Path pathToSet : pathsToSet) {
- LOG.info("Setting permission of parent directory: " + path.toString() +
- " on new directory: " + pathToSet.toString());
- try {
- FsShell fshell = new FsShell();
- fshell.setConf(conf);
- fshell.run(new String[]{"-chmod", "-R", permString, pathToSet.toString()});
- } catch (Exception e) {
- LOG.warn("Error setting permissions of " + pathToSet, e);
- }
+ HadoopShims shim = ShimLoader.getHadoopShims();
+ HdfsFileStatus fullFileStatus = shim.getFullFileStatus(conf, fs, lastExistingParent);
+ try {
+ //set on the entire subtree
+ shim.setFullFileStatus(conf, fullFileStatus, fs, firstNonExistentParent);
+ } catch (Exception e) {
+ LOG.warn("Error setting permissions of " + firstNonExistentParent, e);
}
return true;
}
@@ -523,16 +518,10 @@ public final class FileUtils {
boolean copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
if (copied && inheritPerms) {
- FileStatus destFileStatus = dstFS.getFileStatus(dst);
- FsPermission perm = destFileStatus.getPermission();
- String permString = Integer.toString(perm.toShort(), 8);
- String group = destFileStatus.getGroup();
- //use FsShell to change group and permissions recursively
+ HadoopShims shims = ShimLoader.getHadoopShims();
+ HdfsFileStatus fullFileStatus = shims.getFullFileStatus(conf, dstFS, dst);
try {
- FsShell fshell = new FsShell();
- fshell.setConf(conf);
- fshell.run(new String[]{"-chgrp", "-R", group, dst.toString()});
- fshell.run(new String[]{"-chmod", "-R", permString, dst.toString()});
+ shims.setFullFileStatus(conf, fullFileStatus, dstFS, dst);
} catch (Exception e) {
LOG.warn("Error setting permissions or group of " + dst, e);
}
@@ -587,5 +576,4 @@ public final class FileUtils {
}
return result;
}
-
}
Added: hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java?rev=1599085&view=auto
==============================================================================
--- hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java (added)
+++ hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java Mon Jun 2 03:10:16 2014
@@ -0,0 +1,556 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.security;
+
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.hive.cli.CliSessionState;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * This test the flag 'hive.warehouse.subdir.inherit.perms'.
+ */
+public abstract class FolderPermissionBase {
+ protected static HiveConf conf;
+ protected static Driver driver;
+ protected static String dataFileDir;
+ protected static Path dataFilePath;
+ protected static FileSystem fs;
+
+ protected static Path warehouseDir;
+ protected static Path baseDfsDir;
+
+ public static final PathFilter hiddenFileFilter = new PathFilter(){
+ public boolean accept(Path p){
+ String name = p.getName();
+ return !name.startsWith("_") && !name.startsWith(".");
+ }
+ };
+
+
+ public abstract void setPermission(String locn, int permIndex) throws Exception;
+
+ public abstract void verifyPermission(String locn, int permIndex) throws Exception;
+
+
+ public void setPermission(String locn) throws Exception {
+ setPermission(locn, 0);
+ }
+
+ public void verifyPermission(String locn) throws Exception {
+ verifyPermission(locn, 0);
+ }
+
+
+ public static void baseSetup() throws Exception {
+ MiniDFSShim dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
+ fs = dfs.getFileSystem();
+ baseDfsDir = new Path(new Path(fs.getUri()), "/base");
+ fs.mkdirs(baseDfsDir);
+ warehouseDir = new Path(baseDfsDir, "warehouse");
+ fs.mkdirs(warehouseDir);
+ conf.setVar(ConfVars.METASTOREWAREHOUSE, warehouseDir.toString());
+
+ dataFileDir = conf.get("test.data.files").replace('\\', '/')
+ .replace("c:", "");
+ dataFilePath = new Path(dataFileDir, "kv1.txt");
+
+ //set hive conf vars
+ conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true);
+ conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
+ int port = MetaStoreUtils.findFreePort();
+ MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+
+ SessionState.start(new CliSessionState(conf));
+ driver = new Driver(conf);
+
+ setupDataTable();
+ }
+
+
+ private static void setupDataTable() throws Exception {
+ CommandProcessorResponse ret = driver.run("DROP TABLE IF EXISTS mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ ret = driver.run("CREATE TABLE mysrc (key STRING, value STRING) PARTITIONED BY (part1 string, part2 string) STORED AS TEXTFILE");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='1',part2='1')");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='2',part2='2')");
+ Assert.assertEquals(0,ret.getResponseCode());
+ }
+
+ @Test
+ public void testCreateTable() throws Exception {
+ String testDb = "mydb";
+ String tableName = "createtable";
+ CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ assertExistence(warehouseDir + "/" + testDb + ".db");
+ setPermission(warehouseDir + "/" + testDb + ".db");
+ verifyPermission(warehouseDir + "/" + testDb + ".db");
+
+ ret = driver.run("USE " + testDb);
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+
+ ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
+ verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+
+ Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0);
+ for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) {
+ verifyPermission(child);
+ }
+
+ ret = driver.run("USE default");
+ Assert.assertEquals(0,ret.getResponseCode());
+ }
+
+
+ @Test
+ public void testStaticPartition() throws Exception {
+ String tableName = "staticpart";
+ CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ assertExistence(warehouseDir + "/" + tableName);
+ setPermission(warehouseDir + "/" + tableName);
+
+ ret = driver.run("insert into table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ verifyPermission(warehouseDir + "/" + tableName + "/part1=1");
+ verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1");
+
+ Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
+ for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
+ verifyPermission(child);
+ }
+ }
+
+ @Test
+ public void testAlterPartition() throws Exception {
+ String tableName = "alterpart";
+ CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ assertExistence(warehouseDir + "/" + tableName);
+ setPermission(warehouseDir + "/" + tableName);
+
+ ret = driver.run("insert into table " + tableName + " partition(part1='1',part2='1',part3='1') select key,value from mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ //alter partition
+ ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ verifyPermission(warehouseDir + "/" + tableName + "/part1=2");
+ verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2");
+ verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2");
+
+ Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0);
+ for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) {
+ verifyPermission(child);
+ }
+ }
+
+
+ @Test
+ public void testDynamicPartitions() throws Exception {
+ String tableName = "dynamicpart";
+
+ CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ assertExistence(warehouseDir + "/" + tableName);
+ setPermission(warehouseDir + "/" + tableName);
+
+ ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ verifyPermission(warehouseDir + "/" + tableName + "/part1=1");
+ verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1");
+
+ verifyPermission(warehouseDir + "/" + tableName + "/part1=2");
+ verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2");
+
+ Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
+ for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) {
+ verifyPermission(child);
+ }
+
+ Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2").size() > 0);
+ for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2")) {
+ verifyPermission(child);
+ }
+ }
+
+ @Test
+ public void testExternalTable() throws Exception {
+ String tableName = "externaltable";
+
+ String myLocation = warehouseDir + "/myfolder";
+ FileSystem fs = FileSystem.get(new URI(myLocation), conf);
+ fs.mkdirs(new Path(myLocation));
+ setPermission(myLocation);
+
+ CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) LOCATION '" + myLocation + "'");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(myLocation).size() > 0);
+ for (String child : listStatus(myLocation)) {
+ verifyPermission(child);
+ }
+ }
+
+ @Test
+ public void testInsert() throws Exception {
+ //case 1 is non-partitioned table.
+ String tableName = "insert";
+
+ CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ String tableLoc = warehouseDir + "/" + tableName;
+ assertExistence(warehouseDir + "/" + tableName);
+
+ //case1A: insert into non-partitioned table.
+ setPermission(warehouseDir + "/" + tableName);
+ ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(tableLoc).size() > 0);
+ for (String child : listStatus(tableLoc)) {
+ verifyPermission(child);
+ }
+
+ //case1B: insert overwrite non-partitioned-table
+ setPermission(warehouseDir + "/" + tableName, 1);
+ ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(tableLoc).size() > 0);
+ for (String child : listStatus(tableLoc)) {
+ verifyPermission(child, 1);
+ }
+
+ //case 2 is partitioned table.
+ tableName = "insertpartition";
+
+ ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1";
+ assertExistence(partLoc);
+
+ //case 2A: insert into partitioned table.
+ setPermission(partLoc);
+ ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(partLoc).size() > 0);
+ for (String child : listStatus(partLoc)) {
+ verifyPermission(child);
+ }
+
+ //case 2B: insert into non-partitioned table.
+ setPermission(partLoc, 1);
+ ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(tableLoc).size() > 0);
+ for (String child : listStatus(partLoc)) {
+ verifyPermission(child, 1);
+ }
+ }
+
+ @Test
+ public void testLoadLocal() throws Exception {
+ //case 1 is non-partitioned table.
+ String tableName = "loadlocal";
+
+ CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ String tableLoc = warehouseDir + "/" + tableName;
+ assertExistence(warehouseDir + "/" + tableName);
+
+ //case1A: load data local into non-partitioned table.
+ setPermission(warehouseDir + "/" + tableName);
+
+ ret = driver.run("load data local inpath '" + dataFilePath + "' into table " + tableName);
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(tableLoc).size() > 0);
+ for (String child : listStatus(tableLoc)) {
+ verifyPermission(child);
+ }
+
+ //case1B: load data local into overwrite non-partitioned-table
+ setPermission(warehouseDir + "/" + tableName, 1);
+ ret = driver.run("load data local inpath '" + dataFilePath + "' overwrite into table " + tableName);
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(tableLoc).size() > 0);
+ for (String child : listStatus(tableLoc)) {
+ verifyPermission(child, 1);
+ }
+
+ //case 2 is partitioned table.
+ tableName = "loadlocalpartition";
+
+ ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)");
+ Assert.assertEquals(0,ret.getResponseCode());
+ tableLoc = warehouseDir + "/" + tableName;
+ assertExistence(tableLoc);
+
+ //case 2A: load data local into partitioned table.
+ setPermission(tableLoc);
+ ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1";
+ Assert.assertTrue(listStatus(partLoc).size() > 0);
+ for (String child : listStatus(partLoc)) {
+ verifyPermission(child);
+ }
+
+ //case 2B: insert data overwrite into non-partitioned table.
+ setPermission(partLoc, 1);
+ ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(tableLoc).size() > 0);
+ for (String child : listStatus(partLoc)) {
+ verifyPermission(child, 1);
+ }
+ }
+
+ @Test
+ public void testLoad() throws Exception {
+ String tableName = "load";
+ String location = "/hdfsPath";
+ fs.copyFromLocalFile(dataFilePath, new Path(location));
+
+ //case 1: load data
+ CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
+ Assert.assertEquals(0,ret.getResponseCode());
+ String tableLoc = warehouseDir + "/" + tableName;
+ assertExistence(warehouseDir + "/" + tableName);
+
+ //case1A: load data into non-partitioned table.
+ setPermission(warehouseDir + "/" + tableName);
+
+ ret = driver.run("load data inpath '" + location + "' into table " + tableName);
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(tableLoc).size() > 0);
+ for (String child : listStatus(tableLoc)) {
+ verifyPermission(child);
+ }
+
+ //case1B: load data into overwrite non-partitioned-table
+ setPermission(warehouseDir + "/" + tableName, 1);
+ fs.copyFromLocalFile(dataFilePath, new Path(location));
+ ret = driver.run("load data inpath '" + location + "' overwrite into table " + tableName);
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(tableLoc).size() > 0);
+ for (String child : listStatus(tableLoc)) {
+ verifyPermission(child, 1);
+ }
+
+ //case 2 is partitioned table.
+ tableName = "loadpartition";
+
+ ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)");
+ Assert.assertEquals(0,ret.getResponseCode());
+ tableLoc = warehouseDir + "/" + tableName;
+ assertExistence(tableLoc);
+
+ //case 2A: load data into partitioned table.
+ setPermission(tableLoc);
+ fs.copyFromLocalFile(dataFilePath, new Path(location));
+ ret = driver.run("LOAD DATA INPATH '" + location + "' INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1";
+ Assert.assertTrue(listStatus(partLoc).size() > 0);
+ for (String child : listStatus(partLoc)) {
+ verifyPermission(child);
+ }
+
+ //case 2B: insert data overwrite into non-partitioned table.
+ setPermission(partLoc, 1);
+ fs.copyFromLocalFile(dataFilePath, new Path(location));
+ ret = driver.run("LOAD DATA INPATH '" + location + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ Assert.assertTrue(listStatus(tableLoc).size() > 0);
+ for (String child : listStatus(partLoc)) {
+ verifyPermission(child, 1);
+ }
+ }
+
+ @Test
+ public void testCtas() throws Exception {
+ String testDb = "ctasdb";
+ String tableName = "createtable";
+ CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ assertExistence(warehouseDir + "/" + testDb + ".db");
+ setPermission(warehouseDir + "/" + testDb + ".db");
+ verifyPermission(warehouseDir + "/" + testDb + ".db");
+
+ ret = driver.run("USE " + testDb);
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ ret = driver.run("create table " + tableName + " as select key,value from default.mysrc");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName);
+ verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName);
+
+ Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0);
+ for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) {
+ verifyPermission(child);
+ }
+
+ ret = driver.run("USE default");
+ Assert.assertEquals(0,ret.getResponseCode());
+ }
+
+ @Test
+ public void testExim() throws Exception {
+
+ //export the table to external file.
+ String myLocation = warehouseDir + "/exim";
+ FileSystem fs = FileSystem.get(new URI(myLocation), conf);
+ fs.mkdirs(new Path(myLocation));
+ setPermission(myLocation);
+ myLocation = myLocation + "/temp";
+
+ CommandProcessorResponse ret = driver.run("export table mysrc to '" + myLocation + "'");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ //check if exported data has inherited the permissions.
+ assertExistence(myLocation);
+ verifyPermission(myLocation);
+
+ assertExistence(myLocation + "/part1=1/part2=1");
+ verifyPermission(myLocation + "/part1=1/part2=1");
+ Assert.assertTrue(listStatus(myLocation + "/part1=1/part2=1").size() > 0);
+ for (String child : listStatus(myLocation + "/part1=1/part2=1")) {
+ verifyPermission(child);
+ }
+
+ assertExistence(myLocation + "/part1=2/part2=2");
+ verifyPermission(myLocation + "/part1=2/part2=2");
+ Assert.assertTrue(listStatus(myLocation + "/part1=2/part2=2").size() > 0);
+ for (String child : listStatus(myLocation + "/part1=2/part2=2")) {
+ verifyPermission(child);
+ }
+
+ //import the table back into another database
+ String testDb = "eximdb";
+ ret = driver.run("CREATE DATABASE " + testDb);
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ //use another permission for this import location, to verify that it is really set (permIndex=2)
+ assertExistence(warehouseDir + "/" + testDb + ".db");
+ setPermission(warehouseDir + "/" + testDb + ".db", 1);
+
+ ret = driver.run("USE " + testDb);
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ ret = driver.run("import from '" + myLocation + "'");
+ Assert.assertEquals(0,ret.getResponseCode());
+
+ //check permissions of imported, from the exported table
+ assertExistence(warehouseDir + "/" + testDb + ".db/mysrc");
+ verifyPermission(warehouseDir + "/" + testDb + ".db/mysrc", 1);
+
+ myLocation = warehouseDir + "/" + testDb + ".db/mysrc";
+ assertExistence(myLocation);
+ verifyPermission(myLocation, 1);
+
+ assertExistence(myLocation + "/part1=1/part2=1");
+ verifyPermission(myLocation + "/part1=1/part2=1", 1);
+ Assert.assertTrue(listStatus(myLocation + "/part1=1/part2=1").size() > 0);
+ for (String child : listStatus(myLocation + "/part1=1/part2=1")) {
+ verifyPermission(child, 1);
+ }
+
+ assertExistence(myLocation + "/part1=2/part2=2");
+ verifyPermission(myLocation + "/part1=2/part2=2", 1);
+ Assert.assertTrue(listStatus(myLocation + "/part1=2/part2=2").size() > 0);
+ for (String child : listStatus(myLocation + "/part1=2/part2=2")) {
+ verifyPermission(child, 1);
+ }
+ }
+
+ private void assertExistence(String locn) throws Exception {
+ Assert.assertTrue(fs.exists(new Path(locn)));
+ }
+
+ private List<String> listStatus(String locn) throws Exception {
+ List<String> results = new ArrayList<String>();
+ FileStatus[] listStatus = fs.listStatus(new Path(locn));
+ for (FileStatus status : listStatus) {
+ results.add(status.getPath().toString());
+ }
+ return results;
+ }
+}
Added: hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java?rev=1599085&view=auto
==============================================================================
--- hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java (added)
+++ hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java Mon Jun 2 03:10:16 2014
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.security;
+
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
+import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
+import static org.apache.hadoop.fs.permission.AclEntryType.USER;
+
+import java.util.List;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class TestExtendedAcls extends FolderPermissionBase {
+
+ @BeforeClass
+ public static void setup() throws Exception {
+ conf = new HiveConf(TestExtendedAcls.class);
+ //setup the mini DFS with acl's enabled.
+ conf.set("dfs.namenode.acls.enabled", "true");
+ baseSetup();
+ }
+
+ List<AclEntry> aclSpec1 = Lists.newArrayList(
+ aclEntry(ACCESS, USER, FsAction.ALL),
+ aclEntry(ACCESS, GROUP, FsAction.ALL),
+ aclEntry(ACCESS, OTHER, FsAction.ALL),
+ aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE),
+ aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE),
+ aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE),
+ aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE));
+
+ List<AclEntry> aclSpec2 = Lists.newArrayList(
+ aclEntry(ACCESS, USER, FsAction.ALL),
+ aclEntry(ACCESS, GROUP, FsAction.ALL),
+ aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE),
+ aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE),
+ aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE),
+ aclEntry(ACCESS, GROUP, "bar2", FsAction.READ),
+ aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE));
+
+ @Override
+ public void setPermission(String locn, int permIndex) throws Exception {
+ switch (permIndex) {
+ case 0:
+ setAcl(locn, aclSpec1);
+ break;
+ case 1:
+ setAcl(locn, aclSpec2);
+ break;
+ default:
+ throw new RuntimeException("Only 2 permissions by this test");
+ }
+ }
+
+ @Override
+ public void verifyPermission(String locn, int permIndex) throws Exception {
+ switch (permIndex) {
+ case 0:
+ FsPermission perm = fs.getFileStatus(new Path(locn)).getPermission();
+ Assert.assertEquals(perm.toString(), "rwxrwxrwx");
+
+ List<AclEntry> actual = getAcl(locn);
+ verifyAcls(aclSpec1, actual);
+ break;
+ case 1:
+ perm = fs.getFileStatus(new Path(locn)).getPermission();
+ Assert.assertEquals(perm.toString(), "rwxrwxr-x");
+
+ List<AclEntry> acls = getAcl(locn);
+ verifyAcls(aclSpec2, acls);
+ break;
+ default:
+ throw new RuntimeException("Only 2 permissions by this test");
+ }
+ }
+
+ /**
+ * Create a new AclEntry with scope, type and permission (no name).
+ *
+ * @param scope
+ * AclEntryScope scope of the ACL entry
+ * @param type
+ * AclEntryType ACL entry type
+ * @param permission
+ * FsAction set of permissions in the ACL entry
+ * @return AclEntry new AclEntry
+ */
+ private AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
+ FsAction permission) {
+ return new AclEntry.Builder().setScope(scope).setType(type)
+ .setPermission(permission).build();
+ }
+
+ /**
+ * Create a new AclEntry with scope, type, name and permission.
+ *
+ * @param scope
+ * AclEntryScope scope of the ACL entry
+ * @param type
+ * AclEntryType ACL entry type
+ * @param name
+ * String optional ACL entry name
+ * @param permission
+ * FsAction set of permissions in the ACL entry
+ * @return AclEntry new AclEntry
+ */
+ private AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
+ String name, FsAction permission) {
+ return new AclEntry.Builder().setScope(scope).setType(type).setName(name)
+ .setPermission(permission).build();
+ }
+
+ private void verifyAcls(List<AclEntry> expectedList, List<AclEntry> actualList) {
+ for (AclEntry expected : expectedList) {
+ if (expected.getName() != null) {
+ //the non-named acl's are coming as regular permission, and not as aclEntries.
+ boolean found = false;
+ for (AclEntry actual : actualList) {
+ if (actual.equals(expected)) {
+ found = true;
+ }
+ }
+ if (!found) {
+ Assert.fail("Following Acl does not have a match: " + expected);
+ }
+ }
+ }
+ }
+
+ private void setAcl(String locn, List<AclEntry> aclSpec) throws Exception {
+ fs.setAcl(new Path(locn), aclSpec);
+ }
+
+ private List<AclEntry> getAcl(String locn) throws Exception {
+ return fs.getAclStatus(new Path(locn)).getEntries();
+ }
+}
Modified: hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java?rev=1599085&r1=1599084&r2=1599085&view=diff
==============================================================================
--- hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java (original)
+++ hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java Mon Jun 2 03:10:16 2014
@@ -15,345 +15,37 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.hive.ql.security;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.List;
-
import junit.framework.Assert;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hive.cli.CliSessionState;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.HadoopShims;
-import org.apache.hadoop.hive.shims.ShimLoader;
import org.junit.BeforeClass;
import org.junit.Test;
-/**
- * This test the flag 'hive.warehouse.subdir.inherit.perms'.
- */
-public class TestFolderPermissions {
- protected static HiveConf conf;
- protected static Driver driver;
- protected static String dataFileDir;
- protected static Path dataFilePath;
- protected static String testDir;
- protected static FileSystem fs;
-
- public static final PathFilter hiddenFileFilter = new PathFilter(){
- public boolean accept(Path p){
- String name = p.getName();
- return !name.startsWith("_") && !name.startsWith(".");
- }
- };
-
-
+public class TestFolderPermissions extends FolderPermissionBase {
@BeforeClass
- public static void setUp() throws Exception {
- testDir = System.getProperty("test.warehouse.dir");
-
+ public static void setup() throws Exception {
conf = new HiveConf(TestFolderPermissions.class);
- fs = FileSystem.get(new URI(testDir), conf);
- dataFileDir = conf.get("test.data.files").replace('\\', '/')
- .replace("c:", "");
- dataFilePath = new Path(dataFileDir, "kv1.txt");
-
- int port = MetaStoreUtils.findFreePort();
- conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
- conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true);
- conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
-
- MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
-
- SessionState.start(new CliSessionState(conf));
- driver = new Driver(conf);
-
- setupDataTable();
- }
-
-
- private static void setupDataTable() throws Exception {
- CommandProcessorResponse ret = driver.run("DROP TABLE IF EXISTS mysrc");
- Assert.assertEquals(0,ret.getResponseCode());
-
- ret = driver.run("CREATE TABLE mysrc (key STRING, value STRING) PARTITIONED BY (part1 string, part2 string) STORED AS TEXTFILE");
- Assert.assertEquals(0,ret.getResponseCode());
-
- ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='1',part2='1')");
- Assert.assertEquals(0,ret.getResponseCode());
-
- ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='2',part2='2')");
- Assert.assertEquals(0,ret.getResponseCode());
- }
-
- @Test
- public void testCreateTablePerms() throws Exception {
- String testDb = "mydb";
- String tableName = "createtable";
- CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb);
- Assert.assertEquals(0,ret.getResponseCode());
-
- assertExistence(testDir + "/" + testDb + ".db");
- setPermissions(testDir + "/" + testDb + ".db", FsPermission.createImmutable((short) 0777));
-
- ret = driver.run("USE " + testDb);
- Assert.assertEquals(0,ret.getResponseCode());
-
- ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
- Assert.assertEquals(0,ret.getResponseCode());
-
- ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc");
-
- assertExistence(testDir + "/" + testDb + ".db/" + tableName);
- Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + testDb + ".db/" + tableName).toString());
-
- ret = driver.run("USE default");
- Assert.assertEquals(0,ret.getResponseCode());
- }
-
-
- @Test
- public void testStaticPartitionPerms() throws Exception {
- String tableName = "staticpart";
- CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
- Assert.assertEquals(0,ret.getResponseCode());
-
- assertExistence(testDir + "/" + tableName);
- setPermissions(testDir + "/" + tableName, FsPermission.createImmutable((short) 0777));
-
-
- ret = driver.run("insert into table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'");
- Assert.assertEquals(0,ret.getResponseCode());
-
- Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=1").toString());
- Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=1/part2=1").toString());
-
- Assert.assertTrue(listChildrenPerms(testDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
- for (FsPermission perm : listChildrenPerms(testDir + "/" + tableName + "/part1=1/part2=1")) {
- Assert.assertEquals("rwxrwxrwx", perm.toString());
- }
- }
-
- @Test
- public void testAlterPartitionPerms() throws Exception {
- String tableName = "alterpart";
- CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
- Assert.assertEquals(0,ret.getResponseCode());
-
- assertExistence(testDir + "/" + tableName);
- setPermissions(testDir + "/" + tableName, FsPermission.createImmutable((short) 0777));
-
- ret = driver.run("insert into table " + tableName + " partition(part1='1',part2='1',part3='1') select key,value from mysrc");
- Assert.assertEquals(0,ret.getResponseCode());
-
- //alter partition
- ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')");
- Assert.assertEquals(0,ret.getResponseCode());
-
- Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=2").toString());
- Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=2/part2=2").toString());
- Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=2/part2=2/part3=2").toString());
-
- Assert.assertTrue(listChildrenPerms(testDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0);
- for (FsPermission perm : listChildrenPerms(testDir + "/" + tableName + "/part1=2/part2=2/part3=2")) {
- Assert.assertEquals("rwxrwxrwx", perm.toString());
- }
- }
-
-
- @Test
- public void testDynamicPartitions() throws Exception {
- String tableName = "dynamicpart";
-
- CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)");
- Assert.assertEquals(0,ret.getResponseCode());
-
- assertExistence(testDir + "/" + tableName);
- setPermissions(testDir + "/" + tableName, FsPermission.createImmutable((short) 0777));
-
- ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc");
- Assert.assertEquals(0,ret.getResponseCode());
-
- Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=1").toString());
- Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=1/part2=1").toString());
-
- Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=2").toString());
- Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=2/part2=2").toString());
-
- Assert.assertTrue(listChildrenPerms(testDir + "/" + tableName + "/part1=1/part2=1").size() > 0);
- for (FsPermission perm : listChildrenPerms(testDir + "/" + tableName + "/part1=1/part2=1")) {
- Assert.assertEquals("rwxrwxrwx", perm.toString());
- }
-
- Assert.assertTrue(listChildrenPerms(testDir + "/" + tableName + "/part1=2/part2=2").size() > 0);
- for (FsPermission perm : listChildrenPerms(testDir + "/" + tableName + "/part1=2/part2=2")) {
- Assert.assertEquals("rwxrwxrwx", perm.toString());
- }
- }
-
- @Test
- public void testExternalTable() throws Exception {
- String tableName = "externaltable";
-
- String myLocation = testDir + "/myfolder";
- FileSystem fs = FileSystem.get(new URI(myLocation), conf);
- fs.mkdirs(new Path(myLocation), FsPermission.createImmutable((short) 0777));
-
- CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) LOCATION '" + myLocation + "'");
- Assert.assertEquals(0,ret.getResponseCode());
-
- ret = driver.run("insert into table " + tableName + " select key,value from mysrc");
- Assert.assertEquals(0,ret.getResponseCode());
-
- Assert.assertTrue(listChildrenPerms(myLocation).size() > 0);
- for (FsPermission perm : listChildrenPerms(myLocation)) {
- Assert.assertEquals("rwxrwxrwx", perm.toString());
- }
+ baseSetup();
}
- @Test
- public void testInsertOverwrite() throws Exception {
- //case 1 is non-partitioned table.
- String tableName = "insertoverwrite";
-
- CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)");
- Assert.assertEquals(0,ret.getResponseCode());
-
- String tableLoc = testDir + "/" + tableName;
- assertExistence(testDir + "/" + tableName);
- setPermissions(testDir + "/" + tableName, FsPermission.createImmutable((short) 0777));
-
- ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc");
- Assert.assertEquals(0,ret.getResponseCode());
-
- Assert.assertTrue(listChildrenPerms(tableLoc).size() > 0);
- for (FsPermission perm : listChildrenPerms(tableLoc)) {
- Assert.assertEquals("rwxrwxrwx", perm.toString());
- }
-
- //case 2 is partitioned table.
- tableName = "insertoverwritepartition";
-
- ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)");
- Assert.assertEquals(0,ret.getResponseCode());
-
- ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc");
- Assert.assertEquals(0,ret.getResponseCode());
-
- String partLoc = testDir + "/" + tableName + "/part1=1/part2=1";
- assertExistence(partLoc);
- setPermissions(partLoc, FsPermission.createImmutable((short) 0777));
-
- ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc");
- Assert.assertEquals(0,ret.getResponseCode());
-
- Assert.assertTrue(listChildrenPerms(tableLoc).size() > 0);
- for (FsPermission perm : listChildrenPerms(tableLoc)) {
- Assert.assertEquals("rwxrwxrwx", perm.toString());
- }
- }
-
- @Test
- public void testEximPermissionInheritance() throws Exception {
-
- //export the table to external file.
- String myLocation = testDir + "/exim";
- FileSystem fs = FileSystem.get(new URI(myLocation), conf);
- fs.mkdirs(new Path(myLocation), FsPermission.createImmutable((short) 0777));
-
- myLocation = myLocation + "/temp";
-
- CommandProcessorResponse ret = driver.run("export table mysrc to '" + myLocation + "'");
- Assert.assertEquals(0,ret.getResponseCode());
-
- //check if exported data has inherited the permissions.
- assertExistence(myLocation);
- Assert.assertEquals(getPermissions(myLocation).toString(), "rwxrwxrwx");
-
- assertExistence(myLocation + "/part1=1/part2=1");
- Assert.assertEquals(getPermissions(myLocation + "/part1=1/part2=1").toString(), "rwxrwxrwx");
- Assert.assertTrue(listChildrenPerms(myLocation + "/part1=1/part2=1").size() > 0);
- for (FsPermission perm : listChildrenPerms(myLocation + "/part1=1/part2=1")) {
- Assert.assertEquals("rwxrwxrwx", perm.toString());
- }
-
- assertExistence(myLocation + "/part1=2/part2=2");
- Assert.assertEquals(getPermissions(myLocation + "/part1=2/part2=2").toString(), "rwxrwxrwx");
- Assert.assertTrue(listChildrenPerms(myLocation + "/part1=2/part2=2").size() > 0);
- for (FsPermission perm : listChildrenPerms(myLocation + "/part1=2/part2=2")) {
- Assert.assertEquals("rwxrwxrwx", perm.toString());
- }
-
- //import the table back into another database
- String testDb = "eximdb";
- ret = driver.run("CREATE DATABASE " + testDb);
- Assert.assertEquals(0,ret.getResponseCode());
-
- //use another permission for this import location, to verify that it is really set.
- assertExistence(testDir + "/" + testDb + ".db");
- setPermissions(testDir + "/" + testDb + ".db", FsPermission.createImmutable((short) 0766));
-
- ret = driver.run("USE " + testDb);
- Assert.assertEquals(0,ret.getResponseCode());
-
- ret = driver.run("import from '" + myLocation + "'");
- Assert.assertEquals(0,ret.getResponseCode());
-
- //check permissions of imported, from the exported table
- assertExistence(testDir + "/" + testDb + ".db/mysrc");
- Assert.assertEquals("rwxrw-rw-", getPermissions(testDir + "/" + testDb + ".db/mysrc").toString());
-
- myLocation = testDir + "/" + testDb + ".db/mysrc";
- assertExistence(myLocation);
- Assert.assertEquals(getPermissions(myLocation).toString(), "rwxrw-rw-");
-
- assertExistence(myLocation + "/part1=1/part2=1");
- Assert.assertEquals(getPermissions(myLocation + "/part1=1/part2=1").toString(), "rwxrw-rw-");
- Assert.assertTrue(listChildrenPerms(myLocation + "/part1=1/part2=1").size() > 0);
- for (FsPermission perm : listChildrenPerms(myLocation + "/part1=1/part2=1")) {
- Assert.assertEquals("rwxrw-rw-", perm.toString());
- }
-
- assertExistence(myLocation + "/part1=2/part2=2");
- Assert.assertEquals(getPermissions(myLocation + "/part1=2/part2=2").toString(), "rwxrw-rw-");
- Assert.assertTrue(listChildrenPerms(myLocation + "/part1=2/part2=2").size() > 0);
- for (FsPermission perm : listChildrenPerms(myLocation + "/part1=2/part2=2")) {
- Assert.assertEquals("rwxrw-rw-", perm.toString());
- }
- }
-
-
- private void setPermissions(String locn, FsPermission permissions) throws Exception {
- fs.setPermission(new Path(locn), permissions);
- }
-
- private FsPermission getPermissions(String locn) throws Exception {
- return fs.getFileStatus(new Path(locn)).getPermission();
- }
+ public FsPermission[] expected = new FsPermission[] {
+ FsPermission.createImmutable((short) 0777),
+ FsPermission.createImmutable((short) 0766)
+ };
- private void assertExistence(String locn) throws Exception {
- Assert.assertTrue(fs.exists(new Path(locn)));
+ @Override
+ public void setPermission(String locn, int permIndex) throws Exception {
+ fs.setPermission(new Path(locn), expected[permIndex]);
}
- private List<FsPermission> listChildrenPerms(String locn) throws Exception {
- HadoopShims hadoopShims = ShimLoader.getHadoopShims();
- List<FsPermission> result = new ArrayList<FsPermission>();
- List<FileStatus> fileStatuses = hadoopShims.listLocatedStatus(fs, new Path(locn), hiddenFileFilter);
- for (FileStatus status : fileStatuses) {
- result.add(status.getPermission());
- }
- return result;
+ @Override
+ public void verifyPermission(String locn, int permIndex) throws Exception {
+ FsPermission actual = fs.getFileStatus(new Path(locn)).getPermission();
+ Assert.assertEquals(expected[permIndex], actual);
}
}
Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1599085&r1=1599084&r2=1599085&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Mon Jun 2 03:10:16 2014
@@ -45,10 +45,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
-import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.HiveStatsUtils;
import org.apache.hadoop.hive.common.ObjectPair;
@@ -105,6 +103,8 @@ import org.apache.hadoop.hive.ql.session
import org.apache.hadoop.hive.serde2.Deserializer;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
+import org.apache.hadoop.hive.shims.HadoopShims;
+import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatus;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.util.StringUtils;
@@ -2244,33 +2244,29 @@ private void constructOneLBLocationMap(F
public static boolean renameFile(HiveConf conf, Path srcf, Path destf,
FileSystem fs, boolean replace, boolean isSrcLocal) throws HiveException {
boolean success = false;
+
+ //needed for perm inheritance.
boolean inheritPerms = HiveConf.getBoolVar(conf,
HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
- String group = null;
- String permission = null;
+ HadoopShims shims = ShimLoader.getHadoopShims();
+ HadoopShims.HdfsFileStatus destStatus = null;
try {
if (inheritPerms || replace) {
try{
- FileStatus deststatus = fs.getFileStatus(destf);
- if (inheritPerms) {
- group = deststatus.getGroup();
- permission= Integer.toString(deststatus.getPermission().toShort(), 8);
- }
+ destStatus = shims.getFullFileStatus(conf, fs, destf);
//if destf is an existing directory:
//if replace is true, delete followed by rename(mv) is equivalent to replace
//if replace is false, rename (mv) actually move the src under dest dir
//if destf is an existing file, rename is actually a replace, and do not need
// to delete the file first
- if (replace && deststatus.isDir()) {
+ if (replace && destStatus.getFileStatus().isDir()) {
fs.delete(destf, true);
}
} catch (FileNotFoundException ignore) {
//if dest dir does not exist, any re
if (inheritPerms) {
- FileStatus deststatus = fs.getFileStatus(destf.getParent());
- group = deststatus.getGroup();
- permission= Integer.toString(deststatus.getPermission().toShort(), 8);
+ destStatus = shims.getFullFileStatus(conf, fs, destf.getParent());
}
}
}
@@ -2289,14 +2285,10 @@ private void constructOneLBLocationMap(F
}
if (success && inheritPerms) {
- //use FsShell to change group and permissions recursively
try {
- FsShell fshell = new FsShell();
- fshell.setConf(conf);
- fshell.run(new String[]{"-chgrp", "-R", group, destf.toString()});
- fshell.run(new String[]{"-chmod", "-R", permission, destf.toString()});
- } catch (Exception e) {
- throw new HiveException("Unable to set permissions of " + destf, e);
+ ShimLoader.getHadoopShims().setFullFileStatus(conf, destStatus, fs, destf);
+ } catch (IOException e) {
+ LOG.warn("Error setting permission of file " + destf + ": "+ StringUtils.stringifyException(e));
}
}
return success;
Modified: hive/trunk/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java
URL: http://svn.apache.org/viewvc/hive/trunk/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java?rev=1599085&r1=1599084&r2=1599085&view=diff
==============================================================================
--- hive/trunk/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java (original)
+++ hive/trunk/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java Mon Jun 2 03:10:16 2014
@@ -40,12 +40,14 @@ import java.util.Set;
import javax.security.auth.Subject;
import javax.security.auth.login.LoginException;
+import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.ProxyFileSystem;
@@ -78,9 +80,6 @@ import org.apache.hadoop.util.Progressab
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.util.VersionInfo;
-/**
- * Implemention of shims against Hadoop 0.20.0.
- */
public class Hadoop20Shims implements HadoopShims {
/**
@@ -636,6 +635,51 @@ public class Hadoop20Shims implements Ha
}
@Override
+ public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file)
+ throws IOException {
+ return new Hadoop20FileStatus(fs.getFileStatus(file));
+ }
+
+ @Override
+ public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus,
+ FileSystem fs, Path target) throws IOException {
+ String group = sourceStatus.getFileStatus().getGroup();
+ String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8);
+ //use FsShell to change group and permissions recursively
+ try {
+ FsShell fshell = new FsShell();
+ fshell.setConf(conf);
+ run(fshell, new String[]{"-chgrp", "-R", group, target.toString()});
+ run(fshell, new String[]{"-chmod", "-R", permission, target.toString()});
+ } catch (Exception e) {
+ throw new IOException("Unable to set permissions of " + target, e);
+ }
+ try {
+ if (LOG.isDebugEnabled()) { //some trace logging
+ getFullFileStatus(conf, fs, target).debugLog();
+ }
+ } catch (Exception e) {
+ //ignore.
+ }
+ }
+
+ public class Hadoop20FileStatus implements HdfsFileStatus {
+ private FileStatus fileStatus;
+ public Hadoop20FileStatus(FileStatus fileStatus) {
+ this.fileStatus = fileStatus;
+ }
+ @Override
+ public FileStatus getFileStatus() {
+ return fileStatus;
+ }
+ public void debugLog() {
+ if (fileStatus != null) {
+ LOG.debug(fileStatus.toString());
+ }
+ }
+ }
+
+ @Override
public void authorizeProxyAccess(String proxyUser, UserGroupInformation realUserUgi,
String ipAddress, Configuration conf) throws IOException {
// This hadoop version doesn't have proxy verification
@@ -808,4 +852,9 @@ public class Hadoop20Shims implements Ha
conf.setBoolean("fs." + uri.getScheme() + ".impl.disable.cache", origDisableHDFSCache);
return fs;
}
-}
\ No newline at end of file
+
+ protected void run(FsShell shell, String[] command) throws Exception {
+ LOG.debug(ArrayUtils.toString(command));
+ shell.run(command);
+ }
+}
Modified: hive/trunk/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java
URL: http://svn.apache.org/viewvc/hive/trunk/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java?rev=1599085&r1=1599084&r2=1599085&view=diff
==============================================================================
--- hive/trunk/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java (original)
+++ hive/trunk/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java Mon Jun 2 03:10:16 2014
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FSDataInputS
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.ProxyFileSystem;
@@ -402,6 +403,52 @@ public class Hadoop20SShims extends Hado
}
@Override
+ public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file)
+ throws IOException {
+ return new Hadoop20SFileStatus(fs.getFileStatus(file));
+ }
+
+ @Override
+ public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus,
+ FileSystem fs, Path target) throws IOException {
+ String group = sourceStatus.getFileStatus().getGroup();
+ String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8);
+ //use FsShell to change group and permissions recursively
+ try {
+ FsShell fshell = new FsShell();
+ fshell.setConf(conf);
+ run(fshell, new String[]{"-chgrp", "-R", group, target.toString()});
+ run(fshell, new String[]{"-chmod", "-R", permission, target.toString()});
+ } catch (Exception e) {
+ throw new IOException("Unable to set permissions of " + target, e);
+ }
+ try {
+ if (LOG.isDebugEnabled()) { //some trace logging
+ getFullFileStatus(conf, fs, target).debugLog();
+ }
+ } catch (Exception e) {
+ //ignore.
+ }
+ }
+
+ public class Hadoop20SFileStatus implements HdfsFileStatus {
+ private FileStatus fileStatus;
+ public Hadoop20SFileStatus(FileStatus fileStatus) {
+ this.fileStatus = fileStatus;
+ }
+ @Override
+ public FileStatus getFileStatus() {
+ return fileStatus;
+ }
+ @Override
+ public void debugLog() {
+ if (fileStatus != null) {
+ LOG.debug(fileStatus.toString());
+ }
+ }
+ }
+
+ @Override
public FileSystem createProxyFileSystem(FileSystem fs, URI uri) {
return new ProxyFileSystem(fs, uri);
}
Modified: hive/trunk/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
URL: http://svn.apache.org/viewvc/hive/trunk/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java?rev=1599085&r1=1599084&r2=1599085&view=diff
==============================================================================
--- hive/trunk/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java (original)
+++ hive/trunk/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java Mon Jun 2 03:10:16 2014
@@ -28,6 +28,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
@@ -35,12 +36,19 @@ import org.apache.hadoop.fs.FSDataInputS
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.ProxyFileSystem;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapred.ClusterStatus;
@@ -64,6 +72,11 @@ import org.apache.hadoop.security.UserGr
import org.apache.hadoop.util.Progressable;
import org.apache.tez.test.MiniTezCluster;
+import com.google.common.base.Joiner;
+import com.google.common.base.Objects;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+
/**
* Implemention of shims against Hadoop 0.23.0.
*/
@@ -167,6 +180,10 @@ public class Hadoop23Shims extends Hadoo
return conf.get("yarn.resourcemanager.webapp.address");
}
+ protected boolean isExtendedAclEnabled(Configuration conf) {
+ return Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true");
+ }
+
@Override
public long getDefaultBlockSize(FileSystem fs, Path path) {
return fs.getDefaultBlockSize(path);
@@ -490,6 +507,115 @@ public class Hadoop23Shims extends Hadoo
stream.hflush();
}
+ @Override
+ public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs,
+ Path file) throws IOException {
+ FileStatus fileStatus = fs.getFileStatus(file);
+ AclStatus aclStatus = null;
+ if (isExtendedAclEnabled(conf)) {
+ aclStatus = fs.getAclStatus(file);
+ }
+ return new Hadoop23FileStatus(fileStatus, aclStatus);
+ }
+
+ @Override
+ public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus,
+ FileSystem fs, Path target) throws IOException {
+ String group = sourceStatus.getFileStatus().getGroup();
+ //use FsShell to change group, permissions, and extended ACL's recursively
+ try {
+ FsShell fsShell = new FsShell();
+ fsShell.setConf(conf);
+ run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()});
+
+ if (isExtendedAclEnabled(conf)) {
+ AclStatus aclStatus = ((Hadoop23FileStatus) sourceStatus).getAclStatus();
+ List<AclEntry> aclEntries = aclStatus.getEntries();
+ removeBaseAclEntries(aclEntries);
+
+ //the ACL api's also expect the tradition user/group/other permission in the form of ACL
+ FsPermission sourcePerm = sourceStatus.getFileStatus().getPermission();
+ aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction()));
+ aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction()));
+ aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction()));
+
+ //construct the -setfacl command
+ String aclEntry = Joiner.on(",").join(aclStatus.getEntries());
+ run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()});
+ } else {
+ String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8);
+ run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()});
+ }
+ } catch (Exception e) {
+ throw new IOException("Unable to set permissions of " + target, e);
+ }
+ try {
+ if (LOG.isDebugEnabled()) { //some trace logging
+ getFullFileStatus(conf, fs, target).debugLog();
+ }
+ } catch (Exception e) {
+ //ignore.
+ }
+ }
+
+ public class Hadoop23FileStatus implements HdfsFileStatus {
+ private FileStatus fileStatus;
+ private AclStatus aclStatus;
+ public Hadoop23FileStatus(FileStatus fileStatus, AclStatus aclStatus) {
+ this.fileStatus = fileStatus;
+ this.aclStatus = aclStatus;
+ }
+ @Override
+ public FileStatus getFileStatus() {
+ return fileStatus;
+ }
+ public AclStatus getAclStatus() {
+ return aclStatus;
+ }
+ @Override
+ public void debugLog() {
+ if (fileStatus != null) {
+ LOG.debug(fileStatus.toString());
+ }
+ if (aclStatus != null) {
+ LOG.debug(aclStatus.toString());
+ }
+ }
+ }
+
+ /**
+ * Create a new AclEntry with scope, type and permission (no name).
+ *
+ * @param scope
+ * AclEntryScope scope of the ACL entry
+ * @param type
+ * AclEntryType ACL entry type
+ * @param permission
+ * FsAction set of permissions in the ACL entry
+ * @return AclEntry new AclEntry
+ */
+ private AclEntry newAclEntry(AclEntryScope scope, AclEntryType type,
+ FsAction permission) {
+ return new AclEntry.Builder().setScope(scope).setType(type)
+ .setPermission(permission).build();
+ }
+
+ /**
+ * Removes basic permission acls (unamed acls) from the list of acl entries
+ * @param entries acl entries to remove from.
+ */
+ private void removeBaseAclEntries(List<AclEntry> entries) {
+ Iterables.removeIf(entries, new Predicate<AclEntry>() {
+ @Override
+ public boolean apply(AclEntry input) {
+ if (input.getName() == null) {
+ return true;
+ }
+ return false;
+ }
+ });
+ }
+
class ProxyFileSystem23 extends ProxyFileSystem {
public ProxyFileSystem23(FileSystem fs) {
super(fs);
Modified: hive/trunk/shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
URL: http://svn.apache.org/viewvc/hive/trunk/shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java?rev=1599085&r1=1599084&r2=1599085&view=diff
==============================================================================
--- hive/trunk/shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java (original)
+++ hive/trunk/shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java Mon Jun 2 03:10:16 2014
@@ -31,10 +31,12 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
+import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil;
@@ -615,4 +617,10 @@ public abstract class HadoopShimsSecure
@Override
abstract public FileSystem getNonCachedFileSystem(URI uri, Configuration conf) throws IOException;
+
+ protected void run(FsShell shell, String[] command) throws Exception {
+ LOG.debug(ArrayUtils.toString(command));
+ int retval = shell.run(command);
+ LOG.debug("Return value is :" + retval);
+ }
}
Modified: hive/trunk/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
URL: http://svn.apache.org/viewvc/hive/trunk/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java?rev=1599085&r1=1599084&r2=1599085&view=diff
==============================================================================
--- hive/trunk/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java (original)
+++ hive/trunk/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java Mon Jun 2 03:10:16 2014
@@ -480,6 +480,35 @@ public interface HadoopShims {
*/
public void hflush(FSDataOutputStream stream) throws IOException;
+ /**
+ * For a given file, return a file status
+ * @param conf
+ * @param fs
+ * @param file
+ * @return
+ * @throws IOException
+ */
+ public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException;
+
+ /**
+ * For a given file, set a given file status.
+ * @param conf
+ * @param sourceStatus
+ * @param fs
+ * @param target
+ * @throws IOException
+ */
+ public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus,
+ FileSystem fs, Path target) throws IOException;
+
+ /**
+ * Includes the vanilla FileStatus, and AclStatus if it applies to this version of hadoop.
+ */
+ public interface HdfsFileStatus {
+ public FileStatus getFileStatus();
+ public void debugLog();
+ }
+
public HCatHadoopShims getHCatShim();
public interface HCatHadoopShims {