You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2019/10/22 23:20:13 UTC
[hadoop] branch branch-3.1 updated: HDFS-14884. Add sanity check
that zone key equals feinfo key while setting Xattrs. Contributed by Mukul
Kumar Singh.
This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.1 by this push:
new 51114e1 HDFS-14884. Add sanity check that zone key equals feinfo key while setting Xattrs. Contributed by Mukul Kumar Singh.
51114e1 is described below
commit 51114e12eebba5987b6254be542a9dcf2bb09efc
Author: Mukul Kumar Singh <ms...@apache.org>
AuthorDate: Tue Oct 22 13:29:20 2019 -0700
HDFS-14884. Add sanity check that zone key equals feinfo key while setting Xattrs. Contributed by Mukul Kumar Singh.
Signed-off-by: Wei-Chiu Chuang <we...@apache.org>
(cherry picked from commit a901405ad80b4efee020e1ddd06104121f26e31f)
(cherry picked from commit c9d07a42dc18016b4b2f1e750708d23e8b7f4e28)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
---
.../hadoop/hdfs/server/namenode/FSDirXAttrOp.java | 21 +++++++
.../apache/hadoop/hdfs/TestEncryptionZones.java | 68 ++++++++++++++++++++++
2 files changed, 89 insertions(+)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 9e95f90..8ccb614 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
@@ -42,6 +43,7 @@ import java.util.ListIterator;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO;
class FSDirXAttrOp {
private static final XAttr KEYID_XATTR =
@@ -279,6 +281,25 @@ class FSDirXAttrOp {
* If we're adding the encryption zone xattr, then add src to the list
* of encryption zones.
*/
+
+ if (CRYPTO_XATTR_FILE_ENCRYPTION_INFO.equals(xaName)) {
+ HdfsProtos.PerFileEncryptionInfoProto fileProto = HdfsProtos.
+ PerFileEncryptionInfoProto.parseFrom(xattr.getValue());
+ String keyVersionName = fileProto.getEzKeyVersionName();
+ String zoneKeyName = fsd.ezManager.getKeyName(iip);
+ if (zoneKeyName == null) {
+ throw new IOException("Cannot add raw feInfo XAttr to a file in a " +
+ "non-encryption zone");
+ }
+
+ if (!KeyProviderCryptoExtension.
+ getBaseName(keyVersionName).equals(zoneKeyName)) {
+ throw new IllegalArgumentException(String.format(
+ "KeyVersion '%s' does not belong to the key '%s'",
+ keyVersionName, zoneKeyName));
+ }
+ }
+
if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) {
final HdfsProtos.ZoneEncryptionInfoProto ezProto =
HdfsProtos.ZoneEncryptionInfoProto.parseFrom(xattr.getValue());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index 465e925..40dee0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -35,6 +35,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
@@ -63,6 +64,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
@@ -326,6 +328,72 @@ public class TestEncryptionZones {
}
/**
+ * Tests encrypted files with same original content placed in two different
+ * EZ are not same in encrypted form.
+ */
+ @Test
+ public void testEncryptionZonesDictCp() throws Exception {
+ final String testkey1 = "testkey1";
+ final String testkey2 = "testkey2";
+ DFSTestUtil.createKey(testkey1, cluster, conf);
+ DFSTestUtil.createKey(testkey2, cluster, conf);
+
+ final int len = 8196;
+ final Path zone1 = new Path("/zone1");
+ final Path zone1File = new Path(zone1, "file");
+ final Path raw1File = new Path("/.reserved/raw/zone1/file");
+
+ final Path zone2 = new Path("/zone2");
+ final Path zone2File = new Path(zone2, "file");
+ final Path raw2File = new Path(zone2, "/.reserved/raw/zone2/file");
+
+ // 1. Create two encrypted zones
+ fs.mkdirs(zone1, new FsPermission(700));
+ dfsAdmin.createEncryptionZone(zone1, testkey1, NO_TRASH);
+
+ fs.mkdirs(zone2, new FsPermission(700));
+ dfsAdmin.createEncryptionZone(zone2, testkey2, NO_TRASH);
+
+ // 2. Create a file in one of the zones
+ DFSTestUtil.createFile(fs, zone1File, len, (short) 1, 0xFEED);
+ // 3. Copy it to the other zone through /.raw/reserved
+ FileUtil.copy(fs, raw1File, fs, raw2File, false, conf);
+ Map<String, byte[]> attrs = fs.getXAttrs(raw1File);
+ if (attrs != null) {
+ for (Map.Entry<String, byte[]> entry : attrs.entrySet()) {
+ String xattrName = entry.getKey();
+
+ try {
+ fs.setXAttr(raw2File, xattrName, entry.getValue());
+ fail("Exception should be thrown while setting: " +
+ xattrName + " on file:" + raw2File);
+ } catch (RemoteException e) {
+ Assert.assertEquals(e.getClassName(),
+ IllegalArgumentException.class.getCanonicalName());
+ Assert.assertTrue(e.getMessage().
+ contains("does not belong to the key"));
+ }
+ }
+ }
+
+ assertEquals("File can be created on the root encryption zone " +
+ "with correct length", len, fs.getFileStatus(zone1File).getLen());
+ assertTrue("/zone1 dir is encrypted",
+ fs.getFileStatus(zone1).isEncrypted());
+ assertTrue("File is encrypted", fs.getFileStatus(zone1File).isEncrypted());
+
+ assertTrue("/zone2 dir is encrypted",
+ fs.getFileStatus(zone2).isEncrypted());
+ assertTrue("File is encrypted", fs.getFileStatus(zone2File).isEncrypted());
+
+ // 4. Now the decrypted contents of the files should be different.
+ DFSTestUtil.verifyFilesNotEqual(fs, zone1File, zone2File, len);
+
+ // 5. Encrypted contents of the files should be same.
+ DFSTestUtil.verifyFilesEqual(fs, raw1File, raw2File, len);
+ }
+
+ /**
* Make sure hdfs crypto -provisionTrash command creates a trash directory
* with sticky bits.
* @throws Exception
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org