You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2014/07/25 01:55:17 UTC

svn commit: r1613334 - in /hadoop/common/branches/fs-encryption/hadoop-hdfs-project: hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/ hadoop-hdfs/ hadoop-hdfs/src/main/java/ ...

Author: wang
Date: Thu Jul 24 23:55:15 2014
New Revision: 1613334

URL: http://svn.apache.org/r1613334
Log:
Merge from trunk to branch

Added:
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz
      - copied unchanged from r1613333, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz
      - copied unchanged from r1613333, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz
Removed:
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java
Modified:
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
    hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml

Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1612916-1613333

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java Thu Jul 24 23:55:15 2014
@@ -104,6 +104,10 @@ public class RpcProgramMountd extends Rp
 
   @Override
   public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
+    if (hostsMatcher == null) {
+      return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
+          null);
+    }
     AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
     if (accessPrivilege == AccessPrivilege.NONE) {
       return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
@@ -208,16 +212,23 @@ public class RpcProgramMountd extends Rp
     } else if (mntproc == MNTPROC.UMNTALL) {
       umntall(out, xid, client);
     } else if (mntproc == MNTPROC.EXPORT) {
-      // Currently only support one NFS export 
+      // Currently only support one NFS export
       List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
-      hostsMatchers.add(hostsMatcher);
-      out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+      if (hostsMatcher != null) {
+        hostsMatchers.add(hostsMatcher);
+        out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+      } else {
+        // This means there are no valid exports provided.
+        RpcAcceptedReply.getInstance(xid,
+          RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
+          out);
+      }
     } else {
       // Invalid procedure
       RpcAcceptedReply.getInstance(xid,
           RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
           out);
-    }  
+    }
     ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
     RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
     RpcUtil.sendRpcResponse(ctx, rsp);

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Thu Jul 24 23:55:15 2014
@@ -2123,8 +2123,11 @@ public class RpcProgramNfs3 extends RpcP
     if (!doPortMonitoring(remoteAddress)) {
       return false;
     }
-    
+
     // Check export table
+    if (exports == null) {
+        return false;
+    }
     InetAddress client = ((InetSocketAddress) remoteAddress).getAddress();
     AccessPrivilege access = exports.getAccessPrivilege(client);
     if (access == AccessPrivilege.NONE) {

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Jul 24 23:55:15 2014
@@ -252,6 +252,9 @@ Trunk (Unreleased)
     HDFS-5794. Fix the inconsistency of layout version number of 
     ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
 
+    HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
+    (Vinayakumar B via wheat 9)
+
 Release 2.6.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -357,6 +360,16 @@ Release 2.6.0 - UNRELEASED
     HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause
     a null pointer exception. (Masatake Iwasaki via brandonli)
 
+    HDFS-6114. Block Scan log rolling will never happen if blocks written
+    continuously leading to huge size of dncp_block_verification.log.curr
+    (vinayakumarb via cmccabe)
+
+    HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in
+    nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli)
+
+    HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode
+    is in startup mode. (jing9)
+
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -892,6 +905,12 @@ Release 2.5.0 - UNRELEASED
     HDFS-6703. NFS: Files can be deleted from a read-only mount
     (Srikanth Upputuri via brandonli)
 
+    HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code 
+    when xattr doesn't exist. (Charles Lamb via umamahesh)
+
+    HDFS-6696. Name node cannot start if the path of a file under
+    construction contains ".snapshot". (wang)
+
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
 
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
@@ -970,6 +989,9 @@ Release 2.5.0 - UNRELEASED
     HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal
     via cmccabe)
 
+    HDFS-6723. New NN webUI no longer displays decommissioned state for dead node.
+    (Ming Ma via wheat9)
+
 Release 2.4.1 - 2014-06-23 
 
   INCOMPATIBLE CHANGES

Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1612916-1613333

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java Thu Jul 24 23:55:15 2014
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.util.DataChecksum;
 
 import com.google.common.annotations.VisibleForTesting;

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java Thu Jul 24 23:55:15 2014
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.UserGroupInformation;

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Thu Jul 24 23:55:15 2014
@@ -1358,6 +1358,6 @@ public interface ClientProtocol {
    * @param xAttr <code>XAttr</code> to remove
    * @throws IOException
    */
-  @Idempotent
+  @AtMostOnce
   public void removeXAttr(String src, XAttr xAttr) throws IOException;
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java Thu Jul 24 23:55:15 2014
@@ -27,7 +27,7 @@ import java.nio.channels.ReadableByteCha
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.io.IOUtils;
 
 import com.google.common.base.Preconditions;

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java Thu Jul 24 23:55:15 2014
@@ -84,6 +84,10 @@ class BlockPoolSliceScanner {
   
   private final SortedSet<BlockScanInfo> blockInfoSet
       = new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
+
+  private final SortedSet<BlockScanInfo> newBlockInfoSet =
+      new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
+
   private final GSet<Block, BlockScanInfo> blockMap
       = new LightWeightGSet<Block, BlockScanInfo>(
           LightWeightGSet.computeCapacity(0.5, "BlockMap"));
@@ -195,7 +199,7 @@ class BlockPoolSliceScanner {
       BlockScanInfo info = new BlockScanInfo( block );
       info.lastScanTime = scanTime--; 
       //still keep 'info.lastScanType' to NONE.
-      addBlockInfo(info);
+      addBlockInfo(info, false);
     }
 
     RollingLogs rollingLogs = null;
@@ -221,25 +225,42 @@ class BlockPoolSliceScanner {
     // Should we change throttler bandwidth every time bytesLeft changes?
     // not really required.
   }
-  
-  private synchronized void addBlockInfo(BlockScanInfo info) {
-    boolean added = blockInfoSet.add(info);
+
+  /**
+   * Add the BlockScanInfo to sorted set of blockScanInfo
+   * @param info BlockScanInfo to be added
+   * @param isNewBlock true if the block is the new Block, false if
+   *          BlockScanInfo is being updated with new scanTime
+   */
+  private synchronized void addBlockInfo(BlockScanInfo info,
+      boolean isNewBlock) {
+    boolean added = false;
+    if (isNewBlock) {
+      // check whether the block already present
+      boolean exists = blockInfoSet.contains(info);
+      added = !exists && newBlockInfoSet.add(info);
+    } else {
+      added = blockInfoSet.add(info);
+    }
     blockMap.put(info);
     
     if (added) {
       updateBytesToScan(info.getNumBytes(), info.lastScanTime);
     }
   }
-  
+
   private synchronized void delBlockInfo(BlockScanInfo info) {
     boolean exists = blockInfoSet.remove(info);
+    if (!exists){
+      exists = newBlockInfoSet.remove(info);
+    }
     blockMap.remove(info);
 
     if (exists) {
       updateBytesToScan(-info.getNumBytes(), info.lastScanTime);
     }
   }
-  
+
   /** Update blockMap by the given LogEntry */
   private synchronized void updateBlockInfo(LogEntry e) {
     BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp));
@@ -249,7 +270,7 @@ class BlockPoolSliceScanner {
       delBlockInfo(info);
       info.lastScanTime = e.verificationTime;
       info.lastScanType = ScanType.VERIFICATION_SCAN;
-      addBlockInfo(info);
+      addBlockInfo(info, false);
     }
   }
 
@@ -275,14 +296,14 @@ class BlockPoolSliceScanner {
     info = new BlockScanInfo(block.getLocalBlock());    
     info.lastScanTime = getNewBlockScanTime();
     
-    addBlockInfo(info);
+    addBlockInfo(info, true);
     adjustThrottler();
   }
   
   /** Deletes the block from internal structures */
   synchronized void deleteBlock(Block block) {
     BlockScanInfo info = blockMap.get(block);
-    if ( info != null ) {
+    if (info != null) {
       delBlockInfo(info);
     }
   }
@@ -319,7 +340,7 @@ class BlockPoolSliceScanner {
     info.lastScanType = type;
     info.lastScanTime = now;
     info.lastScanOk = scanOk;
-    addBlockInfo(info);
+    addBlockInfo(info, false);
         
     // Don't update meta data if the verification failed.
     if (!scanOk) {
@@ -578,7 +599,7 @@ class BlockPoolSliceScanner {
           delBlockInfo(info);        
           info.lastScanTime = lastScanTime;
           lastScanTime += verifyInterval;
-          addBlockInfo(info);
+          addBlockInfo(info, false);
         }
       }
     }
@@ -674,12 +695,21 @@ class BlockPoolSliceScanner {
       throw e;
     } finally {
       rollVerificationLogs();
+      rollNewBlocksInfo();
       if (LOG.isDebugEnabled()) {
         LOG.debug("Done scanning block pool: " + blockPoolId);
       }
     }
   }
-  
+
+  // add new blocks to scan in next iteration
+  private synchronized void rollNewBlocksInfo() {
+    for (BlockScanInfo newBlock : newBlockInfoSet) {
+      blockInfoSet.add(newBlock);
+    }
+    newBlockInfoSet.clear();
+  }
+
   private synchronized void rollVerificationLogs() {
     if (verificationLog != null) {
       try {

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Thu Jul 24 23:55:15 2014
@@ -1074,10 +1074,11 @@ public class FSEditLog implements LogsPu
     logEdit(op);
   }
   
-  void logRemoveXAttrs(String src, List<XAttr> xAttrs) {
+  void logRemoveXAttrs(String src, List<XAttr> xAttrs, boolean toLogRpcIds) {
     final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
     op.src = src;
     op.xAttrs = xAttrs;
+    logRpcIds(op, toLogRpcIds);
     logEdit(op);
   }
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Thu Jul 24 23:55:15 2014
@@ -821,6 +821,10 @@ public class FSEditLogLoader {
       RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
       fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src,
           removeXAttrOp.xAttrs);
+      if (toAddRetryCache) {
+        fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId,
+            removeXAttrOp.rpcCallId);
+      }
       break;
     }
     default:

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Thu Jul 24 23:55:15 2014
@@ -3551,6 +3551,7 @@ public abstract class FSEditLogOp {
       XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
       src = p.getSrc();
       xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
+      readRpcIds(in, logVersion);
     }
 
     @Override
@@ -3561,18 +3562,22 @@ public abstract class FSEditLogOp {
       }
       b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
       b.build().writeDelimitedTo(out);
+      // clientId and callId
+      writeRpcIds(rpcClientId, rpcCallId, out);
     }
 
     @Override
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "SRC", src);
       appendXAttrsToXml(contentHandler, xAttrs);
+      appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
     }
 
     @Override
     void fromXml(Stanza st) throws InvalidXmlException {
       src = st.getValue("SRC");
       xAttrs = readXAttrsFromXml(st);
+      readRpcIdsFromXml(st);
     }
   }
   

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Thu Jul 24 23:55:15 2014
@@ -614,6 +614,16 @@ public class FSImageFormat {
     INodeDirectory parentINode = fsDir.rootDir;
     for (long i = 0; i < numFiles; i++) {
       pathComponents = FSImageSerialization.readPathComponents(in);
+      for (int j=0; j < pathComponents.length; j++) {
+        byte[] newComponent = renameReservedComponentOnUpgrade
+            (pathComponents[j], getLayoutVersion());
+        if (!Arrays.equals(newComponent, pathComponents[j])) {
+          String oldPath = DFSUtil.byteArray2PathString(pathComponents);
+          pathComponents[j] = newComponent;
+          String newPath = DFSUtil.byteArray2PathString(pathComponents);
+          LOG.info("Renaming reserved path " + oldPath + " to " + newPath);
+        }
+      }
       final INode newNode = loadINode(
           pathComponents[pathComponents.length-1], false, in, counter);
 
@@ -926,6 +936,7 @@ public class FSImageFormat {
           oldnode = namesystem.dir.getInode(cons.getId()).asFile();
           inSnapshot = true;
         } else {
+          path = renameReservedPathsOnUpgrade(path, getLayoutVersion());
           final INodesInPath iip = fsDir.getLastINodeInPath(path);
           oldnode = INodeFile.valueOf(iip.getINode(0), path);
         }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Jul 24 23:55:15 2014
@@ -8658,11 +8658,12 @@ public class FSNamesystem implements Nam
     nnConf.checkXAttrsConfigFlag();
     FSPermissionChecker pc = getPermissionChecker();
     boolean getAll = xAttrs == null || xAttrs.isEmpty();
-    List<XAttr> filteredXAttrs = null;
     if (!getAll) {
-      filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs);
-      if (filteredXAttrs.isEmpty()) {
-        return filteredXAttrs;
+      try {
+        XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs);
+      } catch (AccessControlException e) {
+        logAuditEvent(false, "getXAttrs", src);
+        throw e;
       }
     }
     checkOperation(OperationCategory.READ);
@@ -8681,15 +8682,21 @@ public class FSNamesystem implements Nam
         if (filteredAll == null || filteredAll.isEmpty()) {
           return null;
         }
-        List<XAttr> toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size());
-        for (XAttr xAttr : filteredXAttrs) {
+        List<XAttr> toGet = Lists.newArrayListWithCapacity(xAttrs.size());
+        for (XAttr xAttr : xAttrs) {
+          boolean foundIt = false;
           for (XAttr a : filteredAll) {
             if (xAttr.getNameSpace() == a.getNameSpace()
                 && xAttr.getName().equals(a.getName())) {
               toGet.add(a);
+              foundIt = true;
               break;
             }
           }
+          if (!foundIt) {
+            throw new IOException(
+                "At least one of the attributes provided was not found.");
+          }
         }
         return toGet;
       }
@@ -8723,17 +8730,42 @@ public class FSNamesystem implements Nam
       readUnlock();
     }
   }
-  
+
+  /**
+   * Remove an xattr for a file or directory.
+   *
+   * @param src
+   *          - path to remove the xattr from
+   * @param xAttr
+   *          - xAttr to remove
+   * @throws AccessControlException
+   * @throws SafeModeException
+   * @throws UnresolvedLinkException
+   * @throws IOException
+   */
   void removeXAttr(String src, XAttr xAttr) throws IOException {
-    nnConf.checkXAttrsConfigFlag();
-    HdfsFileStatus resultingStat = null;
-    FSPermissionChecker pc = getPermissionChecker();
+    CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+    if (cacheEntry != null && cacheEntry.isSuccess()) {
+      return; // Return previous response
+    }
+    boolean success = false;
     try {
-      XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+      removeXAttrInt(src, xAttr, cacheEntry != null);
+      success = true;
     } catch (AccessControlException e) {
       logAuditEvent(false, "removeXAttr", src);
       throw e;
+    } finally {
+      RetryCache.setState(cacheEntry, success);
     }
+  }
+
+  void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache)
+      throws IOException {
+    nnConf.checkXAttrsConfigFlag();
+    HdfsFileStatus resultingStat = null;
+    FSPermissionChecker pc = getPermissionChecker();
+      XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
@@ -8747,12 +8779,12 @@ public class FSNamesystem implements Nam
       xAttrs.add(xAttr);
       List<XAttr> removedXAttrs = dir.removeXAttrs(src, xAttrs);
       if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
-        getEditLog().logRemoveXAttrs(src, removedXAttrs);
+        getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache);
+      } else {
+        throw new IOException(
+            "No matching attributes found for remove operation");
       }
       resultingStat = getAuditFileInfo(src, false);
-    } catch (AccessControlException e) {
-      logAuditEvent(false, "removeXAttr", src);
-      throw e;
     } finally {
       writeUnlock();
     }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java Thu Jul 24 23:55:15 2014
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.XAttrHelpe
 import org.apache.hadoop.security.AccessControlException;
 
 import com.google.common.collect.Lists;
+import com.google.common.base.Preconditions;
 
 /**
  * There are four types of extended attributes <XAttr> defined by the
@@ -60,8 +61,20 @@ public class XAttrPermissionFilter {
     throw new AccessControlException("User doesn't have permission for xattr: "
         + XAttrHelper.getPrefixName(xAttr));
   }
-  
-  static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc, 
+
+  static void checkPermissionForApi(FSPermissionChecker pc,
+                                    List<XAttr> xAttrs) throws AccessControlException {
+    Preconditions.checkArgument(xAttrs != null);
+    if (xAttrs.isEmpty()) {
+      return;
+    }
+
+    for (XAttr xAttr : xAttrs) {
+      checkPermissionForApi(pc, xAttr);
+    }
+  }
+
+  static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
       List<XAttr> xAttrs) {
     assert xAttrs != null : "xAttrs can not be null";
     if (xAttrs == null || xAttrs.isEmpty()) {

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java Thu Jul 24 23:55:15 2014
@@ -113,6 +113,7 @@ import org.apache.hadoop.hdfs.web.resour
 import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
 import org.apache.hadoop.net.Node;
@@ -190,7 +191,7 @@ public class NamenodeWebHdfsMethods {
       throws IOException {
      final NamenodeProtocols np = namenode.getRpcServer();
      if (np == null) {
-       throw new IOException("Namenode is in startup mode");
+       throw new RetriableException("Namenode is in startup mode");
      }
      return np;
   }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java Thu Jul 24 23:55:15 2014
@@ -25,8 +25,8 @@ public class XAttrNameParam extends Stri
   /** Default parameter value. **/
   public static final String DEFAULT = "";
   
-  private static Domain DOMAIN = new Domain(NAME, 
-      Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+"));
+  private static Domain DOMAIN = new Domain(NAME,
+      Pattern.compile(".*"));
   
   public XAttrNameParam(final String str) {
     super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);

Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1612403-1613333

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html Thu Jul 24 23:55:15 2014
@@ -66,7 +66,6 @@
 <div class="row">
   <hr />
   <div class="col-xs-2"><p>Hadoop, 2014.</p></div>
-  <div class="col-xs-1 pull-right"><a style="color: #ddd" href="dfshealth.jsp">Legacy UI</a></div>
 </div>
 </div>
 
@@ -283,7 +282,7 @@
   <tr class="danger">
     <td>{name} ({xferaddr})</td>
     <td>{lastContact}</td>
-    <td>Dead{?decomissioned}, Decomissioned{/decomissioned}</td>
+    <td>Dead{?decommissioned}, Decommissioned{/decommissioned}</td>
     <td>-</td>
     <td>-</td>
     <td>-</td>

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html Thu Jul 24 23:55:15 2014
@@ -18,18 +18,7 @@
     "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
 <html xmlns="http://www.w3.org/1999/xhtml">
 <head>
-<meta http-equiv="REFRESH" content="1;url=dfshealth.jsp" />
+<meta http-equiv="REFRESH" content="0;url=dfshealth.html" />
 <title>Hadoop Administration</title>
 </head>
-<body>
-<script type="text/javascript">
-//<![CDATA[
-window.location.href='dfshealth.html';
-//]]>
-</script>
-<h1>Hadoop Administration</h1>
-<ul>
-<li><a href="dfshealth.jsp">DFS Health/Status</a></li>
-</ul>
-</body>
 </html>

Propchange: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1602934-1613333

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html Thu Jul 24 23:55:15 2014
@@ -21,15 +21,4 @@
   <meta http-equiv="REFRESH" content="0;url=status.html" />
   <title>Hadoop Administration</title>
 </head>
-<body>
-<script type="text/javascript">
-//<![CDATA[
-window.location.href='status.html';
-//]]>
-</script>
-<h1>Hadoop Administration</h1>
-<ul>
-  <li><a href="status.jsp">Status</a></li>
-</ul>
-</body>
 </html>
\ No newline at end of file

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Thu Jul 24 23:55:15 2014
@@ -2653,6 +2653,75 @@ public class TestDFSShell {
     }
   }
 
+  /*
+   * 1. Test that CLI throws an exception and returns non-0 when user does
+   * not have permission to read an xattr.
+   * 2. Test that CLI throws an exception and returns non-0 when a non-existent
+   * xattr is requested.
+   */
+  @Test (timeout = 120000)
+  public void testGetFAttrErrors() throws Exception {
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    MiniDFSCluster cluster = null;
+    PrintStream bakErr = null;
+    try {
+      final Configuration conf = new HdfsConfiguration();
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+
+      final FileSystem fs = cluster.getFileSystem();
+      final Path p = new Path("/foo");
+      fs.mkdirs(p);
+      bakErr = System.err;
+
+      final FsShell fshell = new FsShell(conf);
+      final ByteArrayOutputStream out = new ByteArrayOutputStream();
+      System.setErr(new PrintStream(out));
+
+      // No permission for "other".
+      fs.setPermission(p, new FsPermission((short) 0700));
+
+      {
+        final int ret = ToolRunner.run(fshell, new String[] {
+            "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+        assertEquals("Returned should be 0", 0, ret);
+        out.reset();
+      }
+
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            int ret = ToolRunner.run(fshell, new String[] {
+                "-getfattr", "-n", "user.a1", "/foo"});
+            String str = out.toString();
+            assertTrue("xattr value was incorrectly returned",
+                str.indexOf("1234") == -1);
+            out.reset();
+            return null;
+          }
+        });
+
+      {
+        final int ret = ToolRunner.run(fshell, new String[]{
+            "-getfattr", "-n", "user.nonexistent", "/foo"});
+        String str = out.toString();
+        assertTrue("xattr value was incorrectly returned",
+          str.indexOf(
+              "getfattr: At least one of the attributes provided was not found")
+               >= 0);
+        out.reset();
+      }
+    } finally {
+      if (bakErr != null) {
+        System.setErr(bakErr);
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   /**
    * Test that the server trash configuration is respected when
    * the client configuration is not set.

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Thu Jul 24 23:55:15 2014
@@ -70,6 +70,9 @@ public class TestDFSUpgradeFromImage {
   private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
   private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
   private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
+  private static final String HADOOP1_RESERVED_IMAGE = "hadoop-1-reserved.tgz";
+  private static final String HADOOP023_RESERVED_IMAGE =
+      "hadoop-0.23-reserved.tgz";
   private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz";
 
   private static class ReferenceFileInfo {
@@ -326,6 +329,140 @@ public class TestDFSUpgradeFromImage {
   }
 
   /**
+   * Test upgrade from a branch-1.2 image with reserved paths
+   */
+  @Test
+  public void testUpgradeFromRel1ReservedImage() throws Exception {
+    unpackStorage(HADOOP1_RESERVED_IMAGE);
+    MiniDFSCluster cluster = null;
+    // Try it once without setting the upgrade flag to ensure it fails
+    final Configuration conf = new Configuration();
+    // Try it again with a custom rename string
+    try {
+      FSImageFormat.setRenameReservedPairs(
+          ".snapshot=.user-snapshot," +
+              ".reserved=.my-reserved");
+      cluster =
+          new MiniDFSCluster.Builder(conf)
+              .format(false)
+              .startupOption(StartupOption.UPGRADE)
+              .numDataNodes(0).build();
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      // Make sure the paths were renamed as expected
+      // Also check that paths are present after a restart, checks that the
+      // upgraded fsimage has the same state.
+      final String[] expected = new String[] {
+          "/.my-reserved",
+          "/.user-snapshot",
+          "/.user-snapshot/.user-snapshot",
+          "/.user-snapshot/open",
+          "/dir1",
+          "/dir1/.user-snapshot",
+          "/dir2",
+          "/dir2/.user-snapshot",
+          "/user",
+          "/user/andrew",
+          "/user/andrew/.user-snapshot",
+      };
+      for (int i=0; i<2; i++) {
+        // Restart the second time through this loop
+        if (i==1) {
+          cluster.finalizeCluster(conf);
+          cluster.restartNameNode(true);
+        }
+        ArrayList<Path> toList = new ArrayList<Path>();
+        toList.add(new Path("/"));
+        ArrayList<String> found = new ArrayList<String>();
+        while (!toList.isEmpty()) {
+          Path p = toList.remove(0);
+          FileStatus[] statuses = dfs.listStatus(p);
+          for (FileStatus status: statuses) {
+            final String path = status.getPath().toUri().getPath();
+            System.out.println("Found path " + path);
+            found.add(path);
+            if (status.isDirectory()) {
+              toList.add(status.getPath());
+            }
+          }
+        }
+        for (String s: expected) {
+          assertTrue("Did not find expected path " + s, found.contains(s));
+        }
+        assertEquals("Found an unexpected path while listing filesystem",
+            found.size(), expected.length);
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test upgrade from a 0.23.11 image with reserved paths
+   */
+  @Test
+  public void testUpgradeFromRel023ReservedImage() throws Exception {
+    unpackStorage(HADOOP023_RESERVED_IMAGE);
+    MiniDFSCluster cluster = null;
+    // Try it once without setting the upgrade flag to ensure it fails
+    final Configuration conf = new Configuration();
+    // Try it again with a custom rename string
+    try {
+      FSImageFormat.setRenameReservedPairs(
+          ".snapshot=.user-snapshot," +
+              ".reserved=.my-reserved");
+      cluster =
+          new MiniDFSCluster.Builder(conf)
+              .format(false)
+              .startupOption(StartupOption.UPGRADE)
+              .numDataNodes(0).build();
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      // Make sure the paths were renamed as expected
+      // Also check that paths are present after a restart, checks that the
+      // upgraded fsimage has the same state.
+      final String[] expected = new String[] {
+          "/.user-snapshot",
+          "/dir1",
+          "/dir1/.user-snapshot",
+          "/dir2",
+          "/dir2/.user-snapshot"
+      };
+      for (int i=0; i<2; i++) {
+        // Restart the second time through this loop
+        if (i==1) {
+          cluster.finalizeCluster(conf);
+          cluster.restartNameNode(true);
+        }
+        ArrayList<Path> toList = new ArrayList<Path>();
+        toList.add(new Path("/"));
+        ArrayList<String> found = new ArrayList<String>();
+        while (!toList.isEmpty()) {
+          Path p = toList.remove(0);
+          FileStatus[] statuses = dfs.listStatus(p);
+          for (FileStatus status: statuses) {
+            final String path = status.getPath().toUri().getPath();
+            System.out.println("Found path " + path);
+            found.add(path);
+            if (status.isDirectory()) {
+              toList.add(status.getPath());
+            }
+          }
+        }
+        for (String s: expected) {
+          assertTrue("Did not find expected path " + s, found.contains(s));
+        }
+        assertEquals("Found an unexpected path while listing filesystem",
+            found.size(), expected.length);
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
    * Test upgrade from 2.0 image with a variety of .snapshot and .reserved
    * paths to test renaming on upgrade
    */

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java Thu Jul 24 23:55:15 2014
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.na
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.FileNotFoundException;
 import java.security.PrivilegedExceptionAction;
 import java.util.EnumSet;
 import java.util.List;
@@ -46,6 +45,7 @@ import static org.apache.hadoop.fs.permi
 import static org.apache.hadoop.fs.permission.FsAction.ALL;
 import static org.apache.hadoop.fs.permission.FsAction.READ;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import org.junit.After;
@@ -261,11 +261,12 @@ public class FSXAttrBaseTest {
       fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE, 
           XAttrSetFlag.REPLACE));
       Assert.fail("Setting xattr with empty name should fail.");
+    } catch (RemoteException e) {
+      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+          HadoopIllegalArgumentException.class.getCanonicalName());
+      GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
     } catch (HadoopIllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
-    } catch (IllegalArgumentException e) {
-      GenericTestUtils.assertExceptionContains("Invalid value: \"user.\" does " + 
-          "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
     }
     
     // Set xattr with invalid name: "a1"
@@ -274,11 +275,12 @@ public class FSXAttrBaseTest {
           XAttrSetFlag.REPLACE));
       Assert.fail("Setting xattr with invalid name prefix or without " +
           "name prefix should fail.");
+    } catch (RemoteException e) {
+      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+          HadoopIllegalArgumentException.class.getCanonicalName());
+      GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
     } catch (HadoopIllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
-    } catch (IllegalArgumentException e) {
-      GenericTestUtils.assertExceptionContains("Invalid value: \"a1\" does " + 
-          "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
     }
     
     // Set xattr without XAttrSetFlag
@@ -341,9 +343,18 @@ public class FSXAttrBaseTest {
   }
   
   /**
-   * Tests for getting xattr
-   * 1. To get xattr which does not exist.
-   * 2. To get multiple xattrs.
+   * getxattr tests. Test that getxattr throws an exception if any of
+   * the following are true:
+   * an xattr that was requested doesn't exist
+   * the caller specifies an unknown namespace
+   * the caller doesn't have access to the namespace
+   * the caller doesn't have permission to get the value of the xattr
+   * the caller does not have search access to the parent directory
+   * the caller has only read access to the owning directory
+   * the caller has only search access to the owning directory and
+   * execute/search access to the actual entity
+   * the caller does not have search access to the owning directory and read
+   * access to the actual entity
    */
   @Test(timeout = 120000)
   public void testGetXAttrs() throws Exception {
@@ -351,21 +362,159 @@ public class FSXAttrBaseTest {
     fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
     fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
     
-    // XAttr does not exist.
-    byte[] value = fs.getXAttr(path, name3);
-    Assert.assertEquals(value, null);
-    
-    List<String> names = Lists.newArrayList();
-    names.add(name1);
-    names.add(name2);
-    names.add(name3);
-    Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    /* An XAttr that was requested does not exist. */
+    try {
+      final byte[] value = fs.getXAttr(path, name3);
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains(
+          "At least one of the attributes provided was not found.", e);
+    }
+    
+    /* Throw an exception if an xattr that was requested does not exist. */
+    {
+      final List<String> names = Lists.newArrayList();
+      names.add(name1);
+      names.add(name2);
+      names.add(name3);
+      try {
+        final Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
+        Assert.fail("expected IOException");
+      } catch (IOException e) {
+        GenericTestUtils.assertExceptionContains(
+            "At least one of the attributes provided was not found.", e);
+      }
+    }
     
     fs.removeXAttr(path, name1);
     fs.removeXAttr(path, name2);
+
+    /* Unknown namespace should throw an exception. */
+    try {
+      final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo");
+      Assert.fail("expected IOException");
+    } catch (Exception e) {
+      GenericTestUtils.assertExceptionContains
+          ("An XAttr name must be prefixed with user/trusted/security/system, " +
+           "followed by a '.'",
+          e);
+    }
+
+    /*
+     * The 'trusted' namespace should not be accessible and should throw an
+     * exception.
+     */
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    fs.setXAttr(path, "trusted.foo", "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(path, "trusted.foo");
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
+    }
+
+    fs.setXAttr(path, name1, "1234".getBytes());
+
+    /*
+     * Test that an exception is thrown if the caller doesn't have permission to
+     * get the value of the xattr.
+     */
+
+    /* Set access so that only the owner has access. */
+    fs.setPermission(path, new FsPermission((short) 0700));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(path, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * The caller must have search access to the parent directory.
+     */
+    final Path childDir = new Path(path, "child" + pathCount);
+    /* Set access to parent so that only the owner has access. */
+    FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
+    fs.setXAttr(childDir, name1, "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /* Check that read access to the owning directory is not good enough. */
+    fs.setPermission(path, new FsPermission((short) 0704));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that search access to the owning directory and search/execute
+     * access to the actual entity with extended attributes is not good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0701));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that search access to the owning directory and read access to
+     * the actual entity with the extended attribute is good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0704));
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          final byte[] xattr = userFs.getXAttr(childDir, name1);
+          return null;
+        }
+      });
   }
   
   /**
@@ -402,6 +551,166 @@ public class FSXAttrBaseTest {
     fs.removeXAttr(path, name3);
   }
 
+  /**
+   * removexattr tests. Test that removexattr throws an exception if any of
+   * the following are true:
+   * an xattr that was requested doesn't exist
+   * the caller specifies an unknown namespace
+   * the caller doesn't have access to the namespace
+   * the caller doesn't have permission to get the value of the xattr
+   * the caller does not have "execute" (scan) access to the parent directory
+   * the caller has only read access to the owning directory
+   * the caller has only execute access to the owning directory and execute
+   * access to the actual entity
+   * the caller does not have execute access to the owning directory and write
+   * access to the actual entity
+   */
+  @Test(timeout = 120000)
+  public void testRemoveXAttrPermissions() throws Exception {
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
+    fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+    fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+    fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
+
+    try {
+      fs.removeXAttr(path, name2);
+      fs.removeXAttr(path, name2);
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("No matching attributes found", e);
+    }
+
+    /* Unknown namespace should throw an exception. */
+    final String expectedExceptionString = "An XAttr name must be prefixed " +
+        "with user/trusted/security/system, followed by a '.'";
+    try {
+      fs.removeXAttr(path, "wackynamespace.foo");
+      Assert.fail("expected IOException");
+    } catch (RemoteException e) {
+      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+          HadoopIllegalArgumentException.class.getCanonicalName());
+      GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
+    } catch (HadoopIllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
+    }
+
+    /*
+     * The 'trusted' namespace should not be accessible and should throw an
+     * exception.
+     */
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    fs.setXAttr(path, "trusted.foo", "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(path, "trusted.foo");
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
+    } finally {
+      fs.removeXAttr(path, "trusted.foo");
+    }
+
+    /*
+     * Test that an exception is thrown if the caller doesn't have permission to
+     * get the value of the xattr.
+     */
+
+    /* Set access so that only the owner has access. */
+    fs.setPermission(path, new FsPermission((short) 0700));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(path, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * The caller must have "execute" (scan) access to the parent directory.
+     */
+    final Path childDir = new Path(path, "child" + pathCount);
+    /* Set access to parent so that only the owner has access. */
+    FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
+    fs.setXAttr(childDir, name1, "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /* Check that read access to the owning directory is not good enough. */
+    fs.setPermission(path, new FsPermission((short) 0704));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that execute access to the owning directory and scan access to
+     * the actual entity with extended attributes is not good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0701));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that execute access to the owning directory and write access to
+     * the actual entity with extended attributes is good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0706));
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          userFs.removeXAttr(childDir, name1);
+          return null;
+        }
+      });
+  }
+
   @Test(timeout = 120000)
   public void testRenameFileWithXAttr() throws Exception {
     FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Thu Jul 24 23:55:15 2014
@@ -416,7 +416,7 @@ public class TestNamenodeRetryCache {
     
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -435,7 +435,7 @@ public class TestNamenodeRetryCache {
     assertTrue(namesystem.hasRetryCache());
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
         .getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Thu Jul 24 23:55:15 2014
@@ -160,7 +160,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn0 = cluster.getNamesystem(0);
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -181,7 +181,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn1 = cluster.getNamesystem(1);
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
         .getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();
@@ -1047,6 +1047,49 @@ public class TestRetryCacheWithHA {
     }
   }
 
+  /** removeXAttr */
+  class RemoveXAttrOp extends AtMostOnceOp {
+    private final String src;
+
+    RemoveXAttrOp(DFSClient client, String src) {
+      super("removeXAttr", client);
+      this.src = src;
+    }
+
+    @Override
+    void prepare() throws Exception {
+      Path p = new Path(src);
+      if (!dfs.exists(p)) {
+        DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
+        client.setXAttr(src, "user.key", "value".getBytes(),
+          EnumSet.of(XAttrSetFlag.CREATE));
+      }
+    }
+
+    @Override
+    void invoke() throws Exception {
+      client.removeXAttr(src, "user.key");
+    }
+
+    @Override
+    boolean checkNamenodeBeforeReturn() throws Exception {
+      for (int i = 0; i < CHECKTIMES; i++) {
+        Map<String, byte[]> iter = dfs.getXAttrs(new Path(src));
+        Set<String> keySet = iter.keySet();
+        if (!keySet.contains("user.key")) {
+          return true;
+        }
+        Thread.sleep(1000);
+      }
+      return false;
+    }
+
+    @Override
+    Object getResult() {
+      return null;
+    }
+  }
+
   @Test (timeout=60000)
   public void testCreateSnapshot() throws Exception {
     final DFSClient client = genClientWithDummyHandler();
@@ -1183,6 +1226,13 @@ public class TestRetryCacheWithHA {
     testClientRetryWithFailover(op);
   }
 
+  @Test (timeout=60000)
+  public void testRemoveXAttr() throws Exception {
+    DFSClient client = genClientWithDummyHandler();
+    AtMostOnceOp op = new RemoveXAttrOp(client, "/removexattr");
+    testClientRetryWithFailover(op);
+  }
+
   /**
    * When NN failover happens, if the client did not receive the response and
    * send a retry request to the other NN, the same response should be recieved

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java Thu Jul 24 23:55:15 2014
@@ -39,14 +39,18 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestDFSClientRetries;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
-import org.apache.hadoop.hdfs.TestDFSClientRetries;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 /** Test WebHDFS */
 public class TestWebHDFS {
@@ -445,4 +449,37 @@ public class TestWebHDFS {
       }
     }
   }
+
+  /**
+   * Make sure a RetriableException is thrown when rpcServer is null in
+   * NamenodeWebHdfsMethods.
+   */
+  @Test
+  public void testRaceWhileNNStartup() throws Exception {
+    MiniDFSCluster cluster = null;
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster.waitActive();
+      final NameNode namenode = cluster.getNameNode();
+      final NamenodeProtocols rpcServer = namenode.getRpcServer();
+      Whitebox.setInternalState(namenode, "rpcServer", null);
+
+      final Path foo = new Path("/foo");
+      final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+          WebHdfsFileSystem.SCHEME);
+      try {
+        webHdfs.mkdirs(foo);
+        fail("Expected RetriableException");
+      } catch (RetriableException e) {
+        GenericTestUtils.assertExceptionContains("Namenode is in startup mode",
+            e);
+      }
+      Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java Thu Jul 24 23:55:15 2014
@@ -18,6 +18,15 @@
 
 package org.apache.hadoop.hdfs.web;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -29,18 +38,14 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.Test;
-
-import java.io.IOException;
-import java.net.URI;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
+import org.mockito.internal.util.reflection.Whitebox;
 
 public class TestWebHDFSForHA {
   private static final String LOGICAL_NAME = "minidfs";
@@ -182,4 +187,61 @@ public class TestWebHDFSForHA {
       }
     }
   }
+
+  /**
+   * Make sure the WebHdfsFileSystem will retry based on RetriableException when
+   * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
+   */
+  @Test (timeout=120000)
+  public void testRetryWhileNNStartup() throws Exception {
+    final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
+    MiniDFSCluster cluster = null;
+    final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
+          .numDataNodes(0).build();
+      HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
+      cluster.waitActive();
+      cluster.transitionToActive(0);
+
+      final NameNode namenode = cluster.getNameNode(0);
+      final NamenodeProtocols rpcServer = namenode.getRpcServer();
+      Whitebox.setInternalState(namenode, "rpcServer", null);
+
+      new Thread() {
+        @Override
+        public void run() {
+          boolean result = false;
+          FileSystem fs = null;
+          try {
+            fs = FileSystem.get(WEBHDFS_URI, conf);
+            final Path dir = new Path("/test");
+            result = fs.mkdirs(dir);
+          } catch (IOException e) {
+            result = false;
+          } finally {
+            IOUtils.cleanup(null, fs);
+          }
+          synchronized (TestWebHDFSForHA.this) {
+            resultMap.put("mkdirs", result);
+            TestWebHDFSForHA.this.notifyAll();
+          }
+        }
+      }.start();
+
+      Thread.sleep(1000);
+      Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
+      synchronized (this) {
+        while (!resultMap.containsKey("mkdirs")) {
+          this.wait();
+        }
+        Assert.assertTrue(resultMap.get("mkdirs"));
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java Thu Jul 24 23:55:15 2014
@@ -355,12 +355,6 @@ public class TestParam {
   public void testXAttrNameParam() {
     final XAttrNameParam p = new XAttrNameParam("user.a1");
     Assert.assertEquals(p.getXAttrName(), "user.a1");
-    try {
-      new XAttrNameParam("a1");
-      Assert.fail();
-    } catch (IllegalArgumentException e) {
-      LOG.info("EXPECTED: " + e);
-    }
   }
   
   @Test

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1613334&r1=1613333&r2=1613334&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Thu Jul 24 23:55:15 2014
@@ -986,6 +986,8 @@
         <NAMESPACE>USER</NAMESPACE>
         <NAME>a2</NAME>
       </XATTR>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>82</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>