You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2014/08/20 20:39:08 UTC

svn commit: r1619197 [3/4] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/bin/ src/main/java/ src/main/java/org/apache/hadoop/fs/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/client/ src/main/java/o...

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Aug 20 18:39:03 2014
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
+import static org.apache.hadoop.crypto.key.KeyProviderCryptoExtension
+    .EncryptedKeyVersion;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
@@ -102,6 +105,8 @@ import java.io.StringWriter;
 import java.lang.management.ManagementFactory;
 import java.net.InetAddress;
 import java.net.URI;
+import java.security.GeneralSecurityException;
+import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -115,6 +120,7 @@ import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
@@ -130,12 +136,17 @@ import org.apache.commons.logging.impl.L
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.InvalidPathException;
@@ -159,6 +170,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.UnknownCipherSuiteException;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -170,6 +182,8 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -325,7 +339,7 @@ public class FSNamesystem implements Nam
   private HdfsFileStatus getAuditFileInfo(String path, boolean resolveSymlink)
       throws IOException {
     return (isAuditEnabled() && isExternalInvocation())
-        ? dir.getFileInfo(path, resolveSymlink) : null;
+        ? dir.getFileInfo(path, resolveSymlink, false) : null;
   }
   
   private void logAuditEvent(boolean succeeded, String cmd, String src)
@@ -411,6 +425,8 @@ public class FSNamesystem implements Nam
   private final CacheManager cacheManager;
   private final DatanodeStatistics datanodeStatistics;
 
+  private String nameserviceId;
+
   private RollingUpgradeInfo rollingUpgradeInfo = null;
   /**
    * A flag that indicates whether the checkpointer should checkpoint a rollback
@@ -526,6 +542,11 @@ public class FSNamesystem implements Nam
 
   private final NNConf nnConf;
 
+  private KeyProviderCryptoExtension provider = null;
+  private KeyProvider.Options providerOptions = null;
+
+  private final CryptoCodec codec;
+
   private volatile boolean imageLoaded = false;
   private final Condition cond;
 
@@ -745,6 +766,14 @@ public class FSNamesystem implements Nam
    */
   FSNamesystem(Configuration conf, FSImage fsImage, boolean ignoreRetryCache)
       throws IOException {
+    provider = DFSUtil.createKeyProviderCryptoExtension(conf);
+    if (provider == null) {
+      LOG.info("No KeyProvider found.");
+    } else {
+      LOG.info("Found KeyProvider: " + provider.toString());
+    }
+    providerOptions = KeyProvider.options(conf);
+    this.codec = CryptoCodec.getInstance(conf);
     if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY,
                         DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) {
       LOG.info("Enabling async auditlog");
@@ -776,7 +805,7 @@ public class FSNamesystem implements Nam
 
       // block allocation has to be persisted in HA using a shared edits directory
       // so that the standby has up-to-date namespace information
-      String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
+      nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
       this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);  
       
       // Sanity check the HA-related config.
@@ -903,6 +932,11 @@ public class FSNamesystem implements Nam
   }
 
   @VisibleForTesting
+  public KeyProviderCryptoExtension getProvider() {
+    return provider;
+  }
+
+  @VisibleForTesting
   static RetryCache initRetryCache(Configuration conf) {
     boolean enable = conf.getBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY,
         DFS_NAMENODE_ENABLE_RETRY_CACHE_DEFAULT);
@@ -1630,9 +1664,10 @@ public class FSNamesystem implements Nam
     }
   }
 
-  private void setPermissionInt(String src, FsPermission permission)
+  private void setPermissionInt(final String srcArg, FsPermission permission)
       throws AccessControlException, FileNotFoundException, SafeModeException,
       UnresolvedLinkException, IOException {
+    String src = srcArg;
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -1641,7 +1676,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set permission for " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       dir.setPermission(src, permission);
       getEditLog().logSetPermissions(src, permission);
@@ -1650,7 +1685,7 @@ public class FSNamesystem implements Nam
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "setPermission", src, null, resultingStat);
+    logAuditEvent(true, "setPermission", srcArg, null, resultingStat);
   }
 
   /**
@@ -1668,9 +1703,10 @@ public class FSNamesystem implements Nam
     } 
   }
 
-  private void setOwnerInt(String src, String username, String group)
+  private void setOwnerInt(final String srcArg, String username, String group)
       throws AccessControlException, FileNotFoundException, SafeModeException,
       UnresolvedLinkException, IOException {
+    String src = srcArg;
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -1679,7 +1715,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set owner for " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       if (!pc.isSuperUser()) {
         if (username != null && !pc.getUser().equals(username)) {
@@ -1696,7 +1732,7 @@ public class FSNamesystem implements Nam
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "setOwner", src, null, resultingStat);
+    logAuditEvent(true, "setOwner", srcArg, null, resultingStat);
   }
 
   /**
@@ -1779,10 +1815,11 @@ public class FSNamesystem implements Nam
    * Get block locations within the specified range, updating the
    * access times if necessary. 
    */
-  private LocatedBlocks getBlockLocationsUpdateTimes(String src, long offset,
-      long length, boolean doAccessTime, boolean needBlockToken)
+  private LocatedBlocks getBlockLocationsUpdateTimes(final String srcArg,
+      long offset, long length, boolean doAccessTime, boolean needBlockToken)
       throws FileNotFoundException,
       UnresolvedLinkException, IOException {
+    String src = srcArg;
     FSPermissionChecker pc = getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     for (int attempt = 0; attempt < 2; attempt++) {
@@ -1794,7 +1831,7 @@ public class FSNamesystem implements Nam
         checkOperation(OperationCategory.WRITE);
         writeLock(); // writelock is needed to set accesstime
       }
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       try {
         if (isReadOp) {
           checkOperation(OperationCategory.READ);
@@ -1838,9 +1875,14 @@ public class FSNamesystem implements Nam
           length = Math.min(length, fileSize - offset);
           isUc = false;
         }
-        LocatedBlocks blocks =
+
+        final FileEncryptionInfo feInfo =
+          FSDirectory.isReservedRawName(srcArg) ?
+          null : dir.getFileEncryptionInfo(inode, iip.getPathSnapshotId());
+
+        final LocatedBlocks blocks =
           blockManager.createLocatedBlocks(inode.getBlocks(), fileSize,
-            isUc, offset, length, needBlockToken, iip.isSnapshot());
+            isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo);
         // Set caching information for the located blocks.
         for (LocatedBlock lb: blocks.getLocatedBlocks()) {
           cacheManager.setCachedLocations(lb);
@@ -2061,8 +2103,9 @@ public class FSNamesystem implements Nam
     }
   }
 
-  private void setTimesInt(String src, long mtime, long atime) 
+  private void setTimesInt(final String srcArg, long mtime, long atime)
     throws IOException, UnresolvedLinkException {
+    String src = srcArg;
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -2071,7 +2114,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set times " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
 
       // Write access is required to set access and modification times
       if (isPermissionEnabled) {
@@ -2092,7 +2135,7 @@ public class FSNamesystem implements Nam
     } finally {
       writeUnlock();
     }
-    logAuditEvent(true, "setTimes", src, null, resultingStat);
+    logAuditEvent(true, "setTimes", srcArg, null, resultingStat);
   }
 
   /**
@@ -2123,9 +2166,10 @@ public class FSNamesystem implements Nam
     }
   }
 
-  private void createSymlinkInt(String target, String link,
+  private void createSymlinkInt(String target, final String linkArg,
       PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) 
       throws IOException, UnresolvedLinkException {
+    String link = linkArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.createSymlink: target="
           + target + " link=" + link);
@@ -2138,7 +2182,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot create symlink " + link);
-      link = FSDirectory.resolvePath(link, pathComponents, dir);
+      link = resolvePath(link, pathComponents);
       if (!createParent) {
         verifyParentDir(link);
       }
@@ -2159,7 +2203,7 @@ public class FSNamesystem implements Nam
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "createSymlink", link, target, resultingStat);
+    logAuditEvent(true, "createSymlink", linkArg, target, resultingStat);
   }
 
   /**
@@ -2185,8 +2229,9 @@ public class FSNamesystem implements Nam
     }
   }
 
-  private boolean setReplicationInt(String src, final short replication)
-      throws IOException {
+  private boolean setReplicationInt(final String srcArg,
+      final short replication) throws IOException {
+    String src = srcArg;
     blockManager.verifyReplication(src, replication, null);
     final boolean isFile;
     FSPermissionChecker pc = getPermissionChecker();
@@ -2197,7 +2242,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set replication for " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       if (isPermissionEnabled) {
         checkPathAccess(pc, src, FsAction.WRITE);
       }
@@ -2215,7 +2260,7 @@ public class FSNamesystem implements Nam
 
     getEditLog().logSync();
     if (isFile) {
-      logAuditEvent(true, "setReplication", src);
+      logAuditEvent(true, "setReplication", srcArg);
     }
     return isFile;
   }
@@ -2228,7 +2273,7 @@ public class FSNamesystem implements Nam
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      filename = FSDirectory.resolvePath(filename, pathComponents, dir);
+      filename = resolvePath(filename, pathComponents);
       if (isPermissionEnabled) {
         checkTraverse(pc, filename);
       }
@@ -2256,7 +2301,74 @@ public class FSNamesystem implements Nam
       }
     }
   }
-  
+
+  /**
+   * If the file is within an encryption zone, select the appropriate 
+   * CipherSuite from the list provided by the client. Since the client may 
+   * be newer, need to handle unknown CipherSuites.
+   *
+   * @param srcIIP path of the file
+   * @param cipherSuites client-provided list of supported CipherSuites, 
+   *                     in desired order.
+   * @return chosen CipherSuite, or null if file is not in an EncryptionZone
+   * @throws IOException
+   */
+  private CipherSuite chooseCipherSuite(INodesInPath srcIIP, List<CipherSuite>
+      cipherSuites)
+      throws UnknownCipherSuiteException, UnresolvedLinkException,
+        SnapshotAccessControlException {
+    // Not in an EZ
+    if (!dir.isInAnEZ(srcIIP)) {
+      return null;
+    }
+    CipherSuite chosen = null;
+    for (CipherSuite c : cipherSuites) {
+      if (c.equals(CipherSuite.UNKNOWN)) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Ignoring unknown CipherSuite provided by client: "
+              + c.getUnknownValue());
+        }
+        continue;
+      }
+      for (CipherSuite supported : CipherSuite.values()) {
+        if (supported.equals(c)) {
+          chosen = c;
+          break;
+        }
+      }
+    }
+    if (chosen == null) {
+      throw new UnknownCipherSuiteException(
+          "No cipher suites provided by the client are supported."
+              + " Client provided: " + Arrays.toString(cipherSuites.toArray())
+              + " NameNode supports: " + Arrays.toString(CipherSuite.values()));
+    }
+    return chosen;
+  }
+
+  /**
+   * Invoke KeyProvider APIs to generate an encrypted data encryption key for an
+   * encryption zone. Should not be called with any locks held.
+   *
+   * @param ezKeyName key name of an encryption zone
+   * @return New EDEK, or null if ezKeyName is null
+   * @throws IOException
+   */
+  private EncryptedKeyVersion generateEncryptedDataEncryptionKey(String
+      ezKeyName) throws IOException {
+    if (ezKeyName == null) {
+      return null;
+    }
+    EncryptedKeyVersion edek = null;
+    try {
+      edek = provider.generateEncryptedKey(ezKeyName);
+    } catch (GeneralSecurityException e) {
+      throw new IOException(e);
+    }
+    Preconditions.checkNotNull(edek);
+    return edek;
+  }
+
   /**
    * Create a new file entry in the namespace.
    * 
@@ -2266,7 +2378,8 @@ public class FSNamesystem implements Nam
    */
   HdfsFileStatus startFile(String src, PermissionStatus permissions,
       String holder, String clientMachine, EnumSet<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize)
+      boolean createParent, short replication, long blockSize, 
+      List<CipherSuite> cipherSuites)
       throws AccessControlException, SafeModeException,
       FileAlreadyExistsException, UnresolvedLinkException,
       FileNotFoundException, ParentNotDirectoryException, IOException {
@@ -2279,7 +2392,8 @@ public class FSNamesystem implements Nam
     
     try {
       status = startFileInt(src, permissions, holder, clientMachine, flag,
-          createParent, replication, blockSize, cacheEntry != null);
+          createParent, replication, blockSize, cipherSuites,
+          cacheEntry != null);
     } catch (AccessControlException e) {
       logAuditEvent(false, "create", src);
       throw e;
@@ -2289,19 +2403,30 @@ public class FSNamesystem implements Nam
     return status;
   }
 
-  private HdfsFileStatus startFileInt(String src, PermissionStatus permissions,
-      String holder, String clientMachine, EnumSet<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize,
-      boolean logRetryCache) throws AccessControlException, SafeModeException,
+  private HdfsFileStatus startFileInt(final String srcArg,
+      PermissionStatus permissions, String holder, String clientMachine,
+      EnumSet<CreateFlag> flag, boolean createParent, short replication,
+      long blockSize, List<CipherSuite> cipherSuites, boolean logRetryCache)
+      throws AccessControlException, SafeModeException,
       FileAlreadyExistsException, UnresolvedLinkException,
       FileNotFoundException, ParentNotDirectoryException, IOException {
+    String src = srcArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: src=" + src
-          + ", holder=" + holder
-          + ", clientMachine=" + clientMachine
-          + ", createParent=" + createParent
-          + ", replication=" + replication
-          + ", createFlag=" + flag.toString());
+      StringBuilder builder = new StringBuilder();
+      builder.append("DIR* NameSystem.startFile: src=" + src
+              + ", holder=" + holder
+              + ", clientMachine=" + clientMachine
+              + ", createParent=" + createParent
+              + ", replication=" + replication
+              + ", createFlag=" + flag.toString()
+              + ", blockSize=" + blockSize);
+      builder.append(", cipherSuites=");
+      if (cipherSuites != null) {
+        builder.append(Arrays.toString(cipherSuites.toArray()));
+      } else {
+        builder.append("null");
+      }
+      NameNode.stateChangeLog.debug(builder.toString());
     }
     if (!DFSUtil.isValidName(src)) {
       throw new InvalidPathException(src);
@@ -2322,27 +2447,92 @@ public class FSNamesystem implements Nam
     boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
 
     waitForLoadingFSImage();
-    writeLock();
+
+    /*
+     * We want to avoid holding any locks while doing KeyProvider operations,
+     * since they can be very slow. Since the path can
+     * flip flop between being in an encryption zone and not in the meantime,
+     * we need to recheck the preconditions and redo KeyProvider operations
+     * in some situations.
+     *
+     * A special RetryStartFileException is used to indicate that we should
+     * retry creation of a FileEncryptionInfo.
+     */
     try {
-      checkOperation(OperationCategory.WRITE);
-      checkNameNodeSafeMode("Cannot create file" + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
-      startFileInternal(pc, src, permissions, holder, clientMachine, create,
-          overwrite, createParent, replication, blockSize, logRetryCache);
-      stat = dir.getFileInfo(src, false);
-    } catch (StandbyException se) {
-      skipSync = true;
-      throw se;
+      boolean shouldContinue = true;
+      int iters = 0;
+      while (shouldContinue) {
+        skipSync = false;
+        if (iters >= 10) {
+          throw new IOException("Too many retries because of encryption zone " +
+              "operations, something might be broken!");
+        }
+        shouldContinue = false;
+        iters++;
+
+        // Optimistically determine CipherSuite and ezKeyName if the path is
+        // currently within an encryption zone
+        CipherSuite suite = null;
+        String ezKeyName = null;
+        readLock();
+        try {
+          src = resolvePath(src, pathComponents);
+          INodesInPath iip = dir.getINodesInPath4Write(src);
+          // Nothing to do if the path is not within an EZ
+          if (dir.isInAnEZ(iip)) {
+            suite = chooseCipherSuite(iip, cipherSuites);
+            if (suite != null) {
+              Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN),
+                  "Chose an UNKNOWN CipherSuite!");
+            }
+            ezKeyName = dir.getKeyName(iip);
+            Preconditions.checkState(ezKeyName != null);
+          }
+        } finally {
+          readUnlock();
+        }
+
+        Preconditions.checkState(
+            (suite == null && ezKeyName == null) ||
+            (suite != null && ezKeyName != null),
+            "Both suite and ezKeyName should both be null or not null");
+        // Generate EDEK if necessary while not holding the lock
+        EncryptedKeyVersion edek =
+            generateEncryptedDataEncryptionKey(ezKeyName);
+        EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
+        // Try to create the file with the computed cipher suite and EDEK
+        writeLock();
+        try {
+          checkOperation(OperationCategory.WRITE);
+          checkNameNodeSafeMode("Cannot create file" + src);
+          src = resolvePath(src, pathComponents);
+          startFileInternal(pc, src, permissions, holder, clientMachine, create,
+              overwrite, createParent, replication, blockSize, suite, edek,
+              logRetryCache);
+          stat = dir.getFileInfo(src, false,
+              FSDirectory.isReservedRawName(srcArg));
+        } catch (StandbyException se) {
+          skipSync = true;
+          throw se;
+        } catch (RetryStartFileException e) {
+          shouldContinue = true;
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("Preconditions failed, retrying creation of " +
+                    "FileEncryptionInfo", e);
+          }
+        } finally {
+          writeUnlock();
+        }
+      }
     } finally {
-      writeUnlock();
       // There might be transactions logged while trying to recover the lease.
       // They need to be sync'ed even when an exception was thrown.
       if (!skipSync) {
         getEditLog().logSync();
       }
-    } 
+    }
 
-    logAuditEvent(true, "create", src, null, stat);
+    logAuditEvent(true, "create", srcArg, null, stat);
     return stat;
   }
 
@@ -2358,10 +2548,11 @@ public class FSNamesystem implements Nam
   private void startFileInternal(FSPermissionChecker pc, String src,
       PermissionStatus permissions, String holder, String clientMachine,
       boolean create, boolean overwrite, boolean createParent,
-      short replication, long blockSize, boolean logRetryEntry)
+      short replication, long blockSize, CipherSuite suite,
+      EncryptedKeyVersion edek, boolean logRetryEntry)
       throws FileAlreadyExistsException, AccessControlException,
       UnresolvedLinkException, FileNotFoundException,
-      ParentNotDirectoryException, IOException {
+      ParentNotDirectoryException, RetryStartFileException, IOException {
     assert hasWriteLock();
     // Verify that the destination does not exist as a directory already.
     final INodesInPath iip = dir.getINodesInPath4Write(src);
@@ -2370,6 +2561,26 @@ public class FSNamesystem implements Nam
       throw new FileAlreadyExistsException(src +
           " already exists as a directory");
     }
+
+    FileEncryptionInfo feInfo = null;
+    if (dir.isInAnEZ(iip)) {
+      // The path is now within an EZ, but we're missing encryption parameters
+      if (suite == null || edek == null) {
+        throw new RetryStartFileException();
+      }
+      // Path is within an EZ and we have provided encryption parameters.
+      // Make sure that the generated EDEK matches the settings of the EZ.
+      String ezKeyName = dir.getKeyName(iip);
+      if (!ezKeyName.equals(edek.getEncryptionKeyName())) {
+        throw new RetryStartFileException();
+      }
+      feInfo = new FileEncryptionInfo(suite,
+          edek.getEncryptedKeyVersion().getMaterial(),
+          edek.getEncryptedKeyIv(),
+          edek.getEncryptionKeyVersionName());
+      Preconditions.checkNotNull(feInfo);
+    }
+
     final INodeFile myFile = INodeFile.valueOf(inode, src, true);
     if (isPermissionEnabled) {
       if (overwrite && myFile != null) {
@@ -2422,6 +2633,12 @@ public class FSNamesystem implements Nam
       leaseManager.addLease(newNode.getFileUnderConstructionFeature()
           .getClientName(), src);
 
+      // Set encryption attributes if necessary
+      if (feInfo != null) {
+        dir.setFileEncryptionInfo(src, feInfo);
+        newNode = dir.getInode(newNode.getId()).asFile();
+      }
+
       // record file record in log, record new generation stamp
       getEditLog().logOpenFile(src, newNode, logRetryEntry);
       if (NameNode.stateChangeLog.isDebugEnabled()) {
@@ -2434,7 +2651,7 @@ public class FSNamesystem implements Nam
       throw ie;
     }
   }
-  
+
   /**
    * Append to an existing file for append.
    * <p>
@@ -2560,7 +2777,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot recover the lease of " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       final INodeFile inode = INodeFile.valueOf(dir.getINode(src), src);
       if (!inode.isUnderConstruction()) {
         return true;
@@ -2687,11 +2904,12 @@ public class FSNamesystem implements Nam
     }
   }
 
-  private LocatedBlock appendFileInt(String src, String holder,
+  private LocatedBlock appendFileInt(final String srcArg, String holder,
       String clientMachine, boolean logRetryCache)
       throws AccessControlException, SafeModeException,
       FileAlreadyExistsException, FileNotFoundException,
       ParentNotDirectoryException, IOException {
+    String src = srcArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src
           + ", holder=" + holder
@@ -2706,7 +2924,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot append to file" + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       lb = appendFileInternal(pc, src, holder, clientMachine, logRetryCache);
     } catch (StandbyException se) {
       skipSync = true;
@@ -2727,7 +2945,7 @@ public class FSNamesystem implements Nam
             +" block size " + lb.getBlock().getNumBytes());
       }
     }
-    logAuditEvent(true, "append", src);
+    logAuditEvent(true, "append", srcArg);
     return lb;
   }
 
@@ -2772,7 +2990,7 @@ public class FSNamesystem implements Nam
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       LocatedBlock[] onRetryBlock = new LocatedBlock[1];
       FileState fileState = analyzeFileState(
           src, fileId, clientName, previous, onRetryBlock);
@@ -2995,7 +3213,7 @@ public class FSNamesystem implements Nam
       checkOperation(OperationCategory.READ);
       //check safe mode
       checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
 
       //check lease
       final INode inode;
@@ -3048,7 +3266,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot abandon block " + b + " for file" + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
 
       final INode inode;
       if (fileId == INodeId.GRANDFATHER_INODE_ID) {
@@ -3130,9 +3348,10 @@ public class FSNamesystem implements Nam
    *         (e.g if not all blocks have reached minimum replication yet)
    * @throws IOException on error (eg lease mismatch, file not open, file deleted)
    */
-  boolean completeFile(String src, String holder,
+  boolean completeFile(final String srcArg, String holder,
                        ExtendedBlock last, long fileId)
     throws SafeModeException, UnresolvedLinkException, IOException {
+    String src = srcArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " +
           src + " for " + holder);
@@ -3146,7 +3365,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot complete file " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       success = completeFileInternal(src, holder,
         ExtendedBlock.getLocalBlock(last), fileId);
     } finally {
@@ -3154,7 +3373,7 @@ public class FSNamesystem implements Nam
     }
     getEditLog().logSync();
     if (success) {
-      NameNode.stateChangeLog.info("DIR* completeFile: " + src
+      NameNode.stateChangeLog.info("DIR* completeFile: " + srcArg
           + " is closed by " + holder);
     }
     return success;
@@ -3322,8 +3541,11 @@ public class FSNamesystem implements Nam
     return ret;
   }
 
-  private boolean renameToInt(String src, String dst, boolean logRetryCache) 
+  private boolean renameToInt(final String srcArg, final String dstArg,
+    boolean logRetryCache)
     throws IOException, UnresolvedLinkException {
+    String src = srcArg;
+    String dst = dstArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src +
           " to " + dst);
@@ -3342,8 +3564,8 @@ public class FSNamesystem implements Nam
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot rename " + src);
       waitForLoadingFSImage();
-      src = FSDirectory.resolvePath(src, srcComponents, dir);
-      dst = FSDirectory.resolvePath(dst, dstComponents, dir);
+      src = resolvePath(src, srcComponents);
+      dst = resolvePath(dst, dstComponents);
       checkOperation(OperationCategory.WRITE);
       status = renameToInternal(pc, src, dst, logRetryCache);
       if (status) {
@@ -3354,7 +3576,7 @@ public class FSNamesystem implements Nam
     }
     getEditLog().logSync();
     if (status) {
-      logAuditEvent(true, "rename", src, dst, resultingStat);
+      logAuditEvent(true, "rename", srcArg, dstArg, resultingStat);
     }
     return status;
   }
@@ -3392,8 +3614,10 @@ public class FSNamesystem implements Nam
   
 
   /** Rename src to dst */
-  void renameTo(String src, String dst, Options.Rename... options)
-      throws IOException, UnresolvedLinkException {
+  void renameTo(final String srcArg, final String dstArg,
+      Options.Rename... options) throws IOException, UnresolvedLinkException {
+    String src = srcArg;
+    String dst = dstArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options - "
           + src + " to " + dst);
@@ -3416,8 +3640,8 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot rename " + src);
-      src = FSDirectory.resolvePath(src, srcComponents, dir);
-      dst = FSDirectory.resolvePath(dst, dstComponents, dir);
+      src = resolvePath(src, srcComponents);
+      dst = resolvePath(dst, dstComponents);
       renameToInternal(pc, src, dst, cacheEntry != null, options);
       resultingStat = getAuditFileInfo(dst, false);
       success = true;
@@ -3431,7 +3655,7 @@ public class FSNamesystem implements Nam
       for (Rename option : options) {
         cmd.append(option.value()).append(" ");
       }
-      logAuditEvent(true, cmd.toString(), src, dst, resultingStat);
+      logAuditEvent(true, cmd.toString(), srcArg, dstArg, resultingStat);
     }
   }
 
@@ -3529,7 +3753,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot delete " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       if (!recursive && dir.isNonEmptyDirectory(src)) {
         throw new PathIsNotEmptyDirectoryException(src + " is non empty");
       }
@@ -3537,6 +3761,7 @@ public class FSNamesystem implements Nam
         checkPermission(pc, src, false, null, FsAction.WRITE, null,
             FsAction.ALL, true, false);
       }
+
       long mtime = now();
       // Unlink the target directory from directory tree
       long filesRemoved = dir.delete(src, collectedBlocks, removedINodes,
@@ -3672,7 +3897,7 @@ public class FSNamesystem implements Nam
   /**
    * Get the file info for a specific file.
    *
-   * @param src The string representation of the path to the file
+   * @param srcArg The string representation of the path to the file
    * @param resolveLink whether to throw UnresolvedLinkException 
    *        if src refers to a symlink
    *
@@ -3683,9 +3908,10 @@ public class FSNamesystem implements Nam
    *         or null if file not found
    * @throws StandbyException 
    */
-  HdfsFileStatus getFileInfo(String src, boolean resolveLink) 
+  HdfsFileStatus getFileInfo(final String srcArg, boolean resolveLink)
     throws AccessControlException, UnresolvedLinkException,
            StandbyException, IOException {
+    String src = srcArg;
     if (!DFSUtil.isValidName(src)) {
       throw new InvalidPathException("Invalid file name: " + src);
     }
@@ -3696,34 +3922,36 @@ public class FSNamesystem implements Nam
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       if (isPermissionEnabled) {
         checkPermission(pc, src, false, null, null, null, null, false,
             resolveLink);
       }
-      stat = dir.getFileInfo(src, resolveLink);
+      stat = dir.getFileInfo(src, resolveLink,
+          FSDirectory.isReservedRawName(srcArg));
     } catch (AccessControlException e) {
-      logAuditEvent(false, "getfileinfo", src);
+      logAuditEvent(false, "getfileinfo", srcArg);
       throw e;
     } finally {
       readUnlock();
     }
-    logAuditEvent(true, "getfileinfo", src);
+    logAuditEvent(true, "getfileinfo", srcArg);
     return stat;
   }
   
   /**
    * Returns true if the file is closed
    */
-  boolean isFileClosed(String src) 
+  boolean isFileClosed(final String srcArg)
       throws AccessControlException, UnresolvedLinkException,
       StandbyException, IOException {
+    String src = srcArg;
     FSPermissionChecker pc = getPermissionChecker();  
     checkOperation(OperationCategory.READ);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOperation(OperationCategory.READ);
       if (isPermissionEnabled) {
         checkTraverse(pc, src);
@@ -3731,7 +3959,7 @@ public class FSNamesystem implements Nam
       return !INodeFile.valueOf(dir.getINode(src), src).isUnderConstruction();
     } catch (AccessControlException e) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(false, "isFileClosed", src);
+        logAuditEvent(false, "isFileClosed", srcArg);
       }
       throw e;
     } finally {
@@ -3754,8 +3982,9 @@ public class FSNamesystem implements Nam
     return ret;
   }
 
-  private boolean mkdirsInt(String src, PermissionStatus permissions,
+  private boolean mkdirsInt(final String srcArg, PermissionStatus permissions,
       boolean createParent) throws IOException, UnresolvedLinkException {
+    String src = srcArg;
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
     }
@@ -3771,7 +4000,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);   
       checkNameNodeSafeMode("Cannot create directory " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       status = mkdirsInternal(pc, src, permissions, createParent);
       if (status) {
         resultingStat = getAuditFileInfo(src, false);
@@ -3781,7 +4010,7 @@ public class FSNamesystem implements Nam
     }
     getEditLog().logSync();
     if (status) {
-      logAuditEvent(true, "mkdirs", src, null, resultingStat);
+      logAuditEvent(true, "mkdirs", srcArg, null, resultingStat);
     }
     return status;
   }
@@ -3939,7 +4168,8 @@ public class FSNamesystem implements Nam
    * @return object containing information regarding the file
    *         or null if file not found
    */
-  ContentSummary getContentSummary(String src) throws IOException {
+  ContentSummary getContentSummary(final String srcArg) throws IOException {
+    String src = srcArg;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
@@ -3947,7 +4177,7 @@ public class FSNamesystem implements Nam
     boolean success = true;
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       if (isPermissionEnabled) {
         checkPermission(pc, src, false, null, null, null, FsAction.READ_EXECUTE);
       }
@@ -3958,7 +4188,7 @@ public class FSNamesystem implements Nam
       throw ace;
     } finally {
       readUnlock();
-      logAuditEvent(success, "contentSummary", src);
+      logAuditEvent(success, "contentSummary", srcArg);
     }
   }
 
@@ -4009,7 +4239,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot fsync file " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       final INode inode;
       if (fileId == INodeId.GRANDFATHER_INODE_ID) {
         // Older clients may not have given us an inode ID to work with.
@@ -4475,9 +4705,10 @@ public class FSNamesystem implements Nam
     }
   }
 
-  private DirectoryListing getListingInt(String src, byte[] startAfter,
-      boolean needLocation) 
+  private DirectoryListing getListingInt(final String srcArg, byte[] startAfter,
+      boolean needLocation)
     throws AccessControlException, UnresolvedLinkException, IOException {
+    String src = srcArg;
     DirectoryListing dl;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
@@ -4486,7 +4717,7 @@ public class FSNamesystem implements Nam
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
 
       // Get file name when startAfter is an INodePath
       if (FSDirectory.isReservedName(startAfterString)) {
@@ -4510,7 +4741,7 @@ public class FSNamesystem implements Nam
           checkTraverse(pc, src);
         }
       }
-      logAuditEvent(true, "listStatus", src);
+      logAuditEvent(true, "listStatus", srcArg);
       dl = dir.getListing(src, startAfter, needLocation);
     } finally {
       readUnlock();
@@ -5925,6 +6156,28 @@ public class FSNamesystem implements Nam
     checkPermission(pc, path, false, null, null, null, null);
   }
 
+  /**
+   * This is a wrapper for FSDirectory.resolvePath(). If the path passed
+   * is prefixed with /.reserved/raw, then it checks to ensure that the caller
+   * has super user privs.
+   *
+   * @param path The path to resolve.
+   * @param pathComponents path components corresponding to the path
+   * @return if the path indicates an inode, return path after replacing up to
+   *         <inodeid> with the corresponding path of the inode, else the path
+   *         in {@code src} as is. If the path refers to a path in the "raw"
+   *         directory, return the non-raw pathname.
+   * @throws FileNotFoundException
+   * @throws AccessControlException
+   */
+  private String resolvePath(String path, byte[][] pathComponents)
+      throws FileNotFoundException, AccessControlException {
+    if (FSDirectory.isReservedRawName(path)) {
+      checkSuperuserPrivilege();
+    }
+    return FSDirectory.resolvePath(path, pathComponents, dir);
+  }
+
   @Override
   public void checkSuperuserPrivilege()
       throws AccessControlException {
@@ -8146,7 +8399,9 @@ public class FSNamesystem implements Nam
     return results;
   }
 
-  void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
+  void modifyAclEntries(final String srcArg, List<AclEntry> aclSpec)
+      throws IOException {
+    String src = srcArg;
     nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -8156,7 +8411,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       List<AclEntry> newAcl = dir.modifyAclEntries(src, aclSpec);
       getEditLog().logSetAcl(src, newAcl);
@@ -8165,10 +8420,12 @@ public class FSNamesystem implements Nam
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "modifyAclEntries", src, null, resultingStat);
+    logAuditEvent(true, "modifyAclEntries", srcArg, null, resultingStat);
   }
 
-  void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
+  void removeAclEntries(final String srcArg, List<AclEntry> aclSpec)
+      throws IOException {
+    String src = srcArg;
     nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -8178,7 +8435,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove ACL entries on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       List<AclEntry> newAcl = dir.removeAclEntries(src, aclSpec);
       getEditLog().logSetAcl(src, newAcl);
@@ -8187,10 +8444,11 @@ public class FSNamesystem implements Nam
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "removeAclEntries", src, null, resultingStat);
+    logAuditEvent(true, "removeAclEntries", srcArg, null, resultingStat);
   }
 
-  void removeDefaultAcl(String src) throws IOException {
+  void removeDefaultAcl(final String srcArg) throws IOException {
+    String src = srcArg;
     nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -8200,7 +8458,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       List<AclEntry> newAcl = dir.removeDefaultAcl(src);
       getEditLog().logSetAcl(src, newAcl);
@@ -8209,10 +8467,11 @@ public class FSNamesystem implements Nam
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "removeDefaultAcl", src, null, resultingStat);
+    logAuditEvent(true, "removeDefaultAcl", srcArg, null, resultingStat);
   }
 
-  void removeAcl(String src) throws IOException {
+  void removeAcl(final String srcArg) throws IOException {
+    String src = srcArg;
     nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -8222,7 +8481,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove ACL on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       dir.removeAcl(src);
       getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
@@ -8231,10 +8490,11 @@ public class FSNamesystem implements Nam
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "removeAcl", src, null, resultingStat);
+    logAuditEvent(true, "removeAcl", srcArg, null, resultingStat);
   }
 
-  void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
+  void setAcl(final String srcArg, List<AclEntry> aclSpec) throws IOException {
+    String src = srcArg;
     nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -8244,7 +8504,7 @@ public class FSNamesystem implements Nam
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set ACL on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       List<AclEntry> newAcl = dir.setAcl(src, aclSpec);
       getEditLog().logSetAcl(src, newAcl);
@@ -8253,7 +8513,7 @@ public class FSNamesystem implements Nam
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "setAcl", src, null, resultingStat);
+    logAuditEvent(true, "setAcl", srcArg, null, resultingStat);
   }
 
   AclStatus getAclStatus(String src) throws IOException {
@@ -8264,7 +8524,7 @@ public class FSNamesystem implements Nam
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       if (isPermissionEnabled) {
         checkPermission(pc, src, false, null, null, null, null);
       }
@@ -8273,7 +8533,140 @@ public class FSNamesystem implements Nam
       readUnlock();
     }
   }
-  
+
+  /**
+   * Create an encryption zone on directory src using the specified key.
+   *
+   * @param src     the path of a directory which will be the root of the
+   *                encryption zone. The directory must be empty.
+   * @param keyName name of a key which must be present in the configured
+   *                KeyProvider.
+   * @throws AccessControlException  if the caller is not the superuser.
+   * @throws UnresolvedLinkException if the path can't be resolved.
+   * @throws SafeModeException       if the Namenode is in safe mode.
+   */
+  void createEncryptionZone(final String src, final String keyName)
+    throws IOException, UnresolvedLinkException,
+      SafeModeException, AccessControlException {
+    final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+    if (cacheEntry != null && cacheEntry.isSuccess()) {
+      return; // Return previous response
+    }
+
+    boolean success = false;
+    try {
+      if (provider == null) {
+        throw new IOException(
+            "Can't create an encryption zone for " + src +
+            " since no key provider is available.");
+      }
+      if (keyName == null || keyName.isEmpty()) {
+        throw new IOException("Must specify a key name when creating an " +
+            "encryption zone");
+      }
+      KeyVersion keyVersion = provider.getCurrentKey(keyName);
+      if (keyVersion == null) {
+        /*
+         * It would be nice if we threw something more specific than
+         * IOException when the key is not found, but the KeyProvider API
+         * doesn't provide for that. If that API is ever changed to throw
+         * something more specific (e.g. UnknownKeyException) then we can
+         * update this to match it, or better yet, just rethrow the
+         * KeyProvider's exception.
+         */
+        throw new IOException("Key " + keyName + " doesn't exist.");
+      }
+      createEncryptionZoneInt(src, keyName, cacheEntry != null);
+      success = true;
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "createEncryptionZone", src);
+      throw e;
+    } finally {
+      RetryCache.setState(cacheEntry, success);
+    }
+  }
+
+  private void createEncryptionZoneInt(final String srcArg, String keyName,
+      final boolean logRetryCache) throws IOException {
+    String src = srcArg;
+    HdfsFileStatus resultingStat = null;
+    checkSuperuserPrivilege();
+    checkOperation(OperationCategory.WRITE);
+    final byte[][] pathComponents =
+      FSDirectory.getPathComponentsForReservedPath(src);
+    writeLock();
+    try {
+      checkSuperuserPrivilege();
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("Cannot create encryption zone on " + src);
+      src = resolvePath(src, pathComponents);
+
+      final XAttr ezXAttr = dir.createEncryptionZone(src, keyName);
+      List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
+      xAttrs.add(ezXAttr);
+      getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+      resultingStat = getAuditFileInfo(src, false);
+    } finally {
+      writeUnlock();
+    }
+    getEditLog().logSync();
+    logAuditEvent(true, "createEncryptionZone", srcArg, null, resultingStat);
+  }
+
+  /**
+   * Get the encryption zone for the specified path.
+   *
+   * @param srcArg the path of a file or directory to get the EZ for.
+   * @return the EZ of the of the path or null if none.
+   * @throws AccessControlException  if the caller is not the superuser.
+   * @throws UnresolvedLinkException if the path can't be resolved.
+   */
+  EncryptionZoneWithId getEZForPath(final String srcArg)
+    throws AccessControlException, UnresolvedLinkException, IOException {
+    String src = srcArg;
+    HdfsFileStatus resultingStat = null;
+    final byte[][] pathComponents =
+        FSDirectory.getPathComponentsForReservedPath(src);
+    boolean success = false;
+    final FSPermissionChecker pc = getPermissionChecker();
+    checkOperation(OperationCategory.READ);
+    readLock();
+    try {
+      if (isPermissionEnabled) {
+        checkPathAccess(pc, src, FsAction.READ);
+      }
+      checkOperation(OperationCategory.READ);
+      src = resolvePath(src, pathComponents);
+      final INodesInPath iip = dir.getINodesInPath(src, true);
+      final EncryptionZoneWithId ret = dir.getEZForPath(iip);
+      resultingStat = getAuditFileInfo(src, false);
+      success = true;
+      return ret;
+    } finally {
+      readUnlock();
+      logAuditEvent(success, "getEZForPath", srcArg, null, resultingStat);
+    }
+  }
+
+  BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId)
+      throws IOException {
+    boolean success = false;
+    checkSuperuserPrivilege();
+    checkOperation(OperationCategory.READ);
+    readLock();
+    try {
+      checkSuperuserPrivilege();
+      checkOperation(OperationCategory.READ);
+      final BatchedListEntries<EncryptionZoneWithId> ret =
+          dir.listEncryptionZones(prevId);
+      success = true;
+      return ret;
+    } finally {
+      readUnlock();
+      logAuditEvent(success, "listEncryptionZones", null);
+    }
+  }
+
   /**
    * Set xattr for a file or directory.
    * 
@@ -8307,20 +8700,22 @@ public class FSNamesystem implements Nam
     }
   }
   
-  private void setXAttrInt(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
-      boolean logRetryCache) throws IOException {
+  private void setXAttrInt(final String srcArg, XAttr xAttr,
+      EnumSet<XAttrSetFlag> flag, boolean logRetryCache) throws IOException {
+    String src = srcArg;
     nnConf.checkXAttrsConfigFlag();
     checkXAttrSize(xAttr);
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
-    XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+    XAttrPermissionFilter.checkPermissionForApi(pc, xAttr,
+        FSDirectory.isReservedRawName(src));
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set XAttr on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkXAttrChangeAccess(src, xAttr, pc);
       List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
       xAttrs.add(xAttr);
@@ -8331,7 +8726,7 @@ public class FSNamesystem implements Nam
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "setXAttr", src, null, resultingStat);
+    logAuditEvent(true, "setXAttr", srcArg, null, resultingStat);
   }
 
   /**
@@ -8354,15 +8749,18 @@ public class FSNamesystem implements Nam
     }
   }
   
-  List<XAttr> getXAttrs(String src, List<XAttr> xAttrs) throws IOException {
+  List<XAttr> getXAttrs(final String srcArg, List<XAttr> xAttrs)
+      throws IOException {
+    String src = srcArg;
     nnConf.checkXAttrsConfigFlag();
     FSPermissionChecker pc = getPermissionChecker();
+    final boolean isRawPath = FSDirectory.isReservedRawName(src);
     boolean getAll = xAttrs == null || xAttrs.isEmpty();
     if (!getAll) {
       try {
-        XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs);
+        XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath);
       } catch (AccessControlException e) {
-        logAuditEvent(false, "getXAttrs", src);
+        logAuditEvent(false, "getXAttrs", srcArg);
         throw e;
       }
     }
@@ -8370,14 +8768,14 @@ public class FSNamesystem implements Nam
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOperation(OperationCategory.READ);
       if (isPermissionEnabled) {
         checkPathAccess(pc, src, FsAction.READ);
       }
       List<XAttr> all = dir.getXAttrs(src);
       List<XAttr> filteredAll = XAttrPermissionFilter.
-          filterXAttrsForApi(pc, all);
+          filterXAttrsForApi(pc, all, isRawPath);
       if (getAll) {
         return filteredAll;
       } else {
@@ -8403,7 +8801,7 @@ public class FSNamesystem implements Nam
         return toGet;
       }
     } catch (AccessControlException e) {
-      logAuditEvent(false, "getXAttrs", src);
+      logAuditEvent(false, "getXAttrs", srcArg);
       throw e;
     } finally {
       readUnlock();
@@ -8413,11 +8811,12 @@ public class FSNamesystem implements Nam
   List<XAttr> listXAttrs(String src) throws IOException {
     nnConf.checkXAttrsConfigFlag();
     final FSPermissionChecker pc = getPermissionChecker();
+    final boolean isRawPath = FSDirectory.isReservedRawName(src);
     checkOperation(OperationCategory.READ);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOperation(OperationCategory.READ);
       if (isPermissionEnabled) {
         /* To access xattr names, you need EXECUTE in the owning directory. */
@@ -8425,7 +8824,7 @@ public class FSNamesystem implements Nam
       }
       final List<XAttr> all = dir.getXAttrs(src);
       final List<XAttr> filteredAll = XAttrPermissionFilter.
-        filterXAttrsForApi(pc, all);
+        filterXAttrsForApi(pc, all, isRawPath);
       return filteredAll;
     } catch (AccessControlException e) {
       logAuditEvent(false, "listXAttrs", src);
@@ -8464,19 +8863,21 @@ public class FSNamesystem implements Nam
     }
   }
 
-  void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache)
+  void removeXAttrInt(final String srcArg, XAttr xAttr, boolean logRetryCache)
       throws IOException {
+    String src = srcArg;
     nnConf.checkXAttrsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
-      XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+    XAttrPermissionFilter.checkPermissionForApi(pc, xAttr,
+        FSDirectory.isReservedRawName(src));
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkXAttrChangeAccess(src, xAttr, pc);
 
       List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
@@ -8493,7 +8894,7 @@ public class FSNamesystem implements Nam
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "removeXAttr", src, null, resultingStat);
+    logAuditEvent(true, "removeXAttr", srcArg, null, resultingStat);
   }
 
   private void checkXAttrChangeAccess(String src, XAttr xAttr,

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Wed Aug 20 18:39:03 2014
@@ -37,6 +37,7 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -77,6 +78,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -535,7 +537,8 @@ class NameNodeRpcServer implements Namen
   @Override // ClientProtocol
   public HdfsFileStatus create(String src, FsPermission masked,
       String clientName, EnumSetWritable<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize)
+      boolean createParent, short replication, long blockSize, 
+      List<CipherSuite> cipherSuites)
       throws IOException {
     String clientMachine = getClientMachine();
     if (stateChangeLog.isDebugEnabled()) {
@@ -549,7 +552,7 @@ class NameNodeRpcServer implements Namen
     HdfsFileStatus fileStatus = namesystem.startFile(src, new PermissionStatus(
         getRemoteUser().getShortUserName(), null, masked),
         clientName, clientMachine, flag.get(), createParent, replication,
-        blockSize);
+        blockSize, cipherSuites);
     metrics.incrFilesCreated();
     metrics.incrCreateFileOps();
     return fileStatus;
@@ -1424,6 +1427,24 @@ class NameNodeRpcServer implements Namen
   }
   
   @Override
+  public void createEncryptionZone(String src, String keyName)
+    throws IOException {
+    namesystem.createEncryptionZone(src, keyName);
+  }
+
+  @Override
+  public EncryptionZoneWithId getEZForPath(String src)
+    throws IOException {
+    return namesystem.getEZForPath(src);
+  }
+
+  @Override
+  public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(
+      long prevId) throws IOException {
+    return namesystem.listEncryptionZones(prevId);
+  }
+
+  @Override
   public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
       throws IOException {
     namesystem.setXAttr(src, xAttr, flag);

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java Wed Aug 20 18:39:03 2014
@@ -47,15 +47,27 @@ import com.google.common.base.Preconditi
  * <br>
  * SYSTEM - extended system attributes: these are used by the HDFS
  * core and are not available through admin/user API.
+ * <br>
+ * RAW - extended system attributes: these are used for internal system
+ *   attributes that sometimes need to be exposed. Like SYSTEM namespace
+ *   attributes they are not visible to the user except when getXAttr/getXAttrs
+ *   is called on a file or directory in the /.reserved/raw HDFS directory
+ *   hierarchy. These attributes can only be accessed by the superuser.
+ * </br>
  */
 @InterfaceAudience.Private
 public class XAttrPermissionFilter {
   
-  static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr) 
+  static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr,
+      boolean isRawPath)
       throws AccessControlException {
+    final boolean isSuperUser = pc.isSuperUser();
     if (xAttr.getNameSpace() == XAttr.NameSpace.USER || 
-        (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && 
-        pc.isSuperUser())) {
+        (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) {
+      return;
+    }
+    if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
+        isRawPath && isSuperUser) {
       return;
     }
     throw new AccessControlException("User doesn't have permission for xattr: "
@@ -63,30 +75,34 @@ public class XAttrPermissionFilter {
   }
 
   static void checkPermissionForApi(FSPermissionChecker pc,
-                                    List<XAttr> xAttrs) throws AccessControlException {
+      List<XAttr> xAttrs, boolean isRawPath) throws AccessControlException {
     Preconditions.checkArgument(xAttrs != null);
     if (xAttrs.isEmpty()) {
       return;
     }
 
     for (XAttr xAttr : xAttrs) {
-      checkPermissionForApi(pc, xAttr);
+      checkPermissionForApi(pc, xAttr, isRawPath);
     }
   }
 
   static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
-      List<XAttr> xAttrs) {
+      List<XAttr> xAttrs, boolean isRawPath) {
     assert xAttrs != null : "xAttrs can not be null";
     if (xAttrs == null || xAttrs.isEmpty()) {
       return xAttrs;
     }
     
     List<XAttr> filteredXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
+    final boolean isSuperUser = pc.isSuperUser();
     for (XAttr xAttr : xAttrs) {
       if (xAttr.getNameSpace() == XAttr.NameSpace.USER) {
         filteredXAttrs.add(xAttr);
       } else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && 
-          pc.isSuperUser()) {
+          isSuperUser) {
+        filteredXAttrs.add(xAttr);
+      } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
+          isSuperUser && isRawPath) {
         filteredXAttrs.add(xAttr);
       }
     }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Wed Aug 20 18:39:03 2014
@@ -252,7 +252,8 @@ public class JsonUtil {
             : childrenNumLong.intValue();
     return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
         blockSize, mTime, aTime, permission, owner, group,
-        symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum);
+        symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum,
+        null);
   }
 
   /** Convert an ExtendedBlock to a Json map. */
@@ -532,7 +533,7 @@ public class JsonUtil {
         (Map<?, ?>)m.get("lastLocatedBlock"));
     final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
     return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
-        lastLocatedBlock, isLastBlockComplete);
+        lastLocatedBlock, isLastBlockComplete, null);
   }
 
   /** Convert a ContentSummary to a Json string. */

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1594376-1619194

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Wed Aug 20 18:39:03 2014
@@ -32,6 +32,7 @@ import "Security.proto";
 import "hdfs.proto";
 import "acl.proto";
 import "xattr.proto";
+import "encryption.proto";
 
 /**
  * The ClientNamenodeProtocol Service defines the interface between a client 
@@ -73,6 +74,7 @@ message CreateRequestProto {
   required bool createParent = 5;
   required uint32 replication = 6; // Short: Only 16 bits used
   required uint64 blockSize = 7;
+  repeated CipherSuite cipherSuites = 8;
 }
 
 message CreateResponseProto {
@@ -793,4 +795,10 @@ service ClientNamenodeProtocol {
       returns(RemoveXAttrResponseProto);
   rpc checkAccess(CheckAccessRequestProto)
       returns(CheckAccessResponseProto);
+  rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
+      returns(CreateEncryptionZoneResponseProto);
+  rpc listEncryptionZones(ListEncryptionZonesRequestProto)
+      returns(ListEncryptionZonesResponseProto);
+  rpc getEZForPath(GetEZForPathRequestProto)
+      returns(GetEZForPathResponseProto);
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Wed Aug 20 18:39:03 2014
@@ -200,6 +200,23 @@ message DataEncryptionKeyProto {
   optional string encryptionAlgorithm = 6;
 }
 
+/**
+ * Cipher suite.
+ */
+enum CipherSuite {
+    UNKNOWN = 1;
+    AES_CTR_NOPADDING = 2;
+}
+
+/**
+ * Encryption information for a file.
+ */
+message FileEncryptionInfoProto {
+  required CipherSuite suite = 1;
+  required bytes key = 2;
+  required bytes iv = 3;
+  required string ezKeyVersionName = 4;
+}
 
 /**
  * A set of file blocks and their locations.
@@ -210,9 +227,9 @@ message LocatedBlocksProto {
   required bool underConstruction = 3;
   optional LocatedBlockProto lastBlock = 4;
   required bool isLastBlockComplete = 5;
+  optional FileEncryptionInfoProto fileEncryptionInfo = 6;
 }
 
-
 /**
  * Status of a file, directory or symlink
  * Optionally includes a file's block locations if requested by client on the rpc call.
@@ -243,6 +260,9 @@ message HdfsFileStatusProto {
   // Optional field for fileId
   optional uint64 fileId = 13 [default = 0]; // default as an invalid id
   optional int32 childrenNum = 14 [default = -1];
+
+  // Optional field for file encryption
+  optional FileEncryptionInfoProto fileEncryptionInfo = 15;
 } 
 
 /**

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto Wed Aug 20 18:39:03 2014
@@ -27,6 +27,7 @@ message XAttrProto {
     TRUSTED   = 1;
     SECURITY  = 2;
     SYSTEM    = 3;
+    RAW       = 4;
   }
   
   required XAttrNamespaceProto namespace = 1;

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Wed Aug 20 18:39:03 2014
@@ -2060,4 +2060,13 @@
     block layout (see HDFS-6482 for details on the layout).</description>
 </property>
 
+<property>
+  <name>dfs.namenode.list.encryption.zones.num.responses</name>
+  <value>100</value>
+  <description>When listing encryption zones, the maximum number of zones
+    that will be returned in a batch. Fetching the list incrementally in
+    batches improves namenode performance.
+  </description>
+</property>
+
 </configuration>

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1594376-1619194

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1594376-1619194

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1594376-1619194

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm Wed Aug 20 18:39:03 2014
@@ -30,7 +30,7 @@ Extended Attributes in HDFS
 
 ** {Namespaces and Permissions}
 
-  In HDFS, as in Linux, there are four valid namespaces: <<<user>>>, <<<trusted>>>, <<<system>>>, and <<<security>>>. Each of these namespaces have different access restrictions.
+  In HDFS, there are five valid namespaces: <<<user>>>, <<<trusted>>>, <<<system>>>, <<<security>>>, and <<<raw>>>. Each of these namespaces have different access restrictions.
 
   The <<<user>>> namespace is the namespace that will commonly be used by client applications. Access to extended attributes in the user namespace is controlled by the corresponding file permissions.
 
@@ -40,6 +40,8 @@ Extended Attributes in HDFS
 
   The <<<security>>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods. It is currently unused.
 
+ The <<<raw>>> namespace is reserved for internal system attributes that sometimes need to be exposed. Like <<<system>>> namespace attributes they are not visible to the user except when <<<getXAttr>>>/<<<getXAttrs>>> is called on a file or directory in the <<</.reserved/raw>>> HDFS directory hierarchy. These attributes can only be accessed by the superuser. An example of where <<<raw>>> namespace extended attributes are used is the <<<distcp>>> utility. Encryption zone meta data is stored in <<<raw.*>>> extended attributes, so as long as the administrator uses <<</.reserved/raw>>> pathnames in source and target, the encrypted files in the encryption zones are transparently copied.
+
 * {Interacting with extended attributes}
 
   The Hadoop shell has support for interacting with extended attributes via <<<hadoop fs -getfattr>>> and <<<hadoop fs -setfattr>>>. These commands are styled after the Linux {{{http://www.bestbits.at/acl/man/man1/getfattr.txt}getfattr(1)}} and {{{http://www.bestbits.at/acl/man/man1/setfattr.txt}setfattr(1)}} commands.

Propchange: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1594376-1619194

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java Wed Aug 20 18:39:03 2014
@@ -29,7 +29,7 @@ import org.junit.Test;
  * Tests for <code>XAttr</code> objects.
  */
 public class TestXAttr {
-  private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4;
+  private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4, XATTR5;
   
   @BeforeClass
   public static void setUp() throws Exception {
@@ -58,6 +58,11 @@ public class TestXAttr {
       .setName("name")
       .setValue(value)
       .build();
+    XATTR5 = new XAttr.Builder()
+      .setNameSpace(XAttr.NameSpace.RAW)
+      .setName("name")
+      .setValue(value)
+      .build();
   }
   
   @Test
@@ -65,14 +70,17 @@ public class TestXAttr {
     assertNotSame(XATTR1, XATTR2);
     assertNotSame(XATTR2, XATTR3);
     assertNotSame(XATTR3, XATTR4);
+    assertNotSame(XATTR4, XATTR5);
     assertEquals(XATTR, XATTR1);
     assertEquals(XATTR1, XATTR1);
     assertEquals(XATTR2, XATTR2);
     assertEquals(XATTR3, XATTR3);
     assertEquals(XATTR4, XATTR4);
+    assertEquals(XATTR5, XATTR5);
     assertFalse(XATTR1.equals(XATTR2));
     assertFalse(XATTR2.equals(XATTR3));
     assertFalse(XATTR3.equals(XATTR4));
+    assertFalse(XATTR4.equals(XATTR5));
   }
   
   @Test
@@ -81,5 +89,6 @@ public class TestXAttr {
     assertFalse(XATTR1.hashCode() == XATTR2.hashCode());
     assertFalse(XATTR2.hashCode() == XATTR3.hashCode());
     assertFalse(XATTR3.hashCode() == XATTR4.hashCode());
+    assertFalse(XATTR4.hashCode() == XATTR5.hashCode());
   }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Aug 20 18:39:03 2014
@@ -27,6 +27,7 @@ import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
@@ -78,6 +79,7 @@ import org.junit.Assume;
 import java.io.*;
 import java.net.*;
 import java.nio.ByteBuffer;
+import java.security.NoSuchAlgorithmException;
 import java.security.PrivilegedExceptionAction;
 import java.util.*;
 import java.util.concurrent.TimeoutException;
@@ -86,6 +88,7 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /** Utilities for HDFS tests */
 public class DFSTestUtil {
@@ -1305,6 +1308,71 @@ public class DFSTestUtil {
   }
 
   /**
+   * Verify that two files have the same contents.
+   *
+   * @param fs The file system containing the two files.
+   * @param p1 The path of the first file.
+   * @param p2 The path of the second file.
+   * @param len The length of the two files.
+   * @throws IOException
+   */
+  public static void verifyFilesEqual(FileSystem fs, Path p1, Path p2, int len)
+      throws IOException {
+    final FSDataInputStream in1 = fs.open(p1);
+    final FSDataInputStream in2 = fs.open(p2);
+    for (int i = 0; i < len; i++) {
+      assertEquals("Mismatch at byte " + i, in1.read(), in2.read());
+    }
+    in1.close();
+    in2.close();
+  }
+
+  /**
+   * Verify that two files have different contents.
+   *
+   * @param fs The file system containing the two files.
+   * @param p1 The path of the first file.
+   * @param p2 The path of the second file.
+   * @param len The length of the two files.
+   * @throws IOException
+   */
+  public static void verifyFilesNotEqual(FileSystem fs, Path p1, Path p2,
+      int len)
+          throws IOException {
+    final FSDataInputStream in1 = fs.open(p1);
+    final FSDataInputStream in2 = fs.open(p2);
+    try {
+      for (int i = 0; i < len; i++) {
+        if (in1.read() != in2.read()) {
+          return;
+        }
+      }
+      fail("files are equal, but should not be");
+    } finally {
+      in1.close();
+      in2.close();
+    }
+  }
+
+  /**
+   * Helper function to create a key in the Key Provider.
+   *
+   * @param keyName The name of the key to create
+   * @param cluster The cluster to create it in
+   * @param conf Configuration to use
+   */
+  public static void createKey(String keyName, MiniDFSCluster cluster,
+                                Configuration conf)
+          throws NoSuchAlgorithmException, IOException {
+    KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider();
+    final KeyProvider.Options options = KeyProvider.options(conf);
+    options.setDescription(keyName);
+    options.setBitLength(128);
+    provider.createKey(keyName, options);
+    provider.flush();
+  }
+
+  /**
    * @return the node which is expected to run the recovery of the
    * given block, which is known to be under construction inside the
    * given NameNOde.

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1619197&r1=1619196&r2=1619197&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Wed Aug 20 18:39:03 2014
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyList;
 import static org.mockito.Matchers.anyLong;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Matchers.anyShort;
@@ -51,6 +52,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -253,16 +255,16 @@ public class TestDFSClientRetries {
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0)).when(mockNN).getFileInfo(anyString());
+                1010, 0, null)).when(mockNN).getFileInfo(anyString());
     
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0))
+                1010, 0, null))
         .when(mockNN)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
-            anyShort(), anyLong());
+            anyShort(), anyLong(), (List<CipherSuite>) anyList());
 
     final DFSClient client = new DFSClient(null, mockNN, conf, null);
     OutputStream os = client.create("testfile", true);
@@ -494,7 +496,8 @@ public class TestDFSClientRetries {
       List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
       badBlocks.add(badLocatedBlock);
       return new LocatedBlocks(goodBlockList.getFileLength(), false,
-                               badBlocks, null, true);
+                               badBlocks, null, true,
+                               null);
     }
   }