You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2015/09/03 22:59:09 UTC

[1/3] hadoop git commit: HDFS-7609. Avoid retry cache collision when Standby NameNode loading edits. Contributed by Ming Ma.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6.1 563dbd29e -> ae0fac3ef


HDFS-7609. Avoid retry cache collision when Standby NameNode loading edits. Contributed by Ming Ma.

(cherry picked from commit 7817674a3a4d097b647dd77f1345787dd376d5ea)
(cherry picked from commit 17fb442a4c4e43105374c97fccd68dd966729a19)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

(cherry picked from commit fad2a062ddbb955a42dd5a90d64781617287f8df)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46b9393c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46b9393c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46b9393c

Branch: refs/heads/branch-2.6.1
Commit: 46b9393cababeec8d26fc5cf55f96e502357b422
Parents: 563dbd2
Author: Jing Zhao <ji...@apache.org>
Authored: Fri May 29 11:05:13 2015 -0700
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Thu Sep 3 13:40:45 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/namenode/FSNamesystem.java      | 18 ----------
 .../hdfs/server/namenode/NameNodeRpcServer.java | 20 +++++++++++
 .../namenode/ha/TestRetryCacheWithHA.java       | 37 ++++++++++++++++++--
 4 files changed, 58 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46b9393c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7d88967..68adff8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -147,6 +147,9 @@ Release 2.6.1 - UNRELEASED
     HDFS-8431. hdfs crypto class not found in Windows.
     (Anu Engineer via cnauroth)
 
+    HDFS-7609. Avoid retry cache collision when Standby NameNode loading edits.
+    (Ming Ma via jing9)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46b9393c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 2c6a65d..19edbb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2003,7 +2003,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
-    checkOperation(OperationCategory.WRITE);
     waitForLoadingFSImage();
     writeLock();
     try {
@@ -2563,7 +2562,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     boolean skipSync = false;
     HdfsFileStatus stat = null;
     FSPermissionChecker pc = getPermissionChecker();
-    checkOperation(OperationCategory.WRITE);
     if (blockSize < minBlockSize) {
       throw new IOException("Specified block size is less than configured" +
           " minimum value (" + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY
@@ -3137,7 +3135,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
     LocatedBlock lb = null;
     FSPermissionChecker pc = getPermissionChecker();
-    checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
@@ -3806,7 +3803,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       throw new IOException("Invalid name: " + dst);
     }
     FSPermissionChecker pc = getPermissionChecker();
-    checkOperation(OperationCategory.WRITE);
     byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
     byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
     boolean status = false;
@@ -3879,7 +3875,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     final FSPermissionChecker pc = getPermissionChecker();
     
-    checkOperation(OperationCategory.WRITE);
     CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
     if (cacheEntry != null && cacheEntry.isSuccess()) {
       return; // Return previous response
@@ -4003,7 +3998,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     List<INode> removedINodes = new ChunkedArrayList<INode>();
     FSPermissionChecker pc = getPermissionChecker();
-    checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     boolean ret = false;
 
@@ -7048,7 +7042,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   void updatePipeline(String clientName, ExtendedBlock oldBlock, 
       ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
       throws IOException {
-    checkOperation(OperationCategory.WRITE);
     CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
     if (cacheEntry != null && cacheEntry.isSuccess()) {
       return; // Return previous response
@@ -8141,7 +8134,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    */
   void renameSnapshot(String path, String snapshotOldName,
       String snapshotNewName) throws SafeModeException, IOException {
-    checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = getPermissionChecker();
     CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
     if (cacheEntry != null && cacheEntry.isSuccess()) {
@@ -8255,7 +8247,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    */
   void deleteSnapshot(String snapshotRoot, String snapshotName)
       throws SafeModeException, IOException {
-    checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = getPermissionChecker();
     
     CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
@@ -8501,7 +8492,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
   long addCacheDirective(CacheDirectiveInfo directive, EnumSet<CacheFlag> flags)
       throws IOException {
-    checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
     CacheEntryWithPayload cacheEntry =
@@ -8548,7 +8538,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
   void modifyCacheDirective(CacheDirectiveInfo directive,
       EnumSet<CacheFlag> flags) throws IOException {
-    checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
     boolean success = false;
@@ -8584,7 +8573,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   }
 
   void removeCacheDirective(Long id) throws IOException {
-    checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
     CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
@@ -8639,7 +8627,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   }
 
   public void addCachePool(CachePoolInfo req) throws IOException {
-    checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc = isPermissionEnabled ?
         getPermissionChecker() : null;
     CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
@@ -8674,7 +8661,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   }
 
   public void modifyCachePool(CachePoolInfo req) throws IOException {
-    checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc =
         isPermissionEnabled ? getPermissionChecker() : null;
     CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
@@ -8708,7 +8694,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   }
 
   public void removeCachePool(String cachePoolName) throws IOException {
-    checkOperation(OperationCategory.WRITE);
     final FSPermissionChecker pc =
         isPermissionEnabled ? getPermissionChecker() : null;
     CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
@@ -8975,7 +8960,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     String src = srcArg;
     HdfsFileStatus resultingStat = null;
     checkSuperuserPrivilege();
-    checkOperation(OperationCategory.WRITE);
     final byte[][] pathComponents =
       FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
@@ -9098,7 +9082,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     FSPermissionChecker pc = getPermissionChecker();
     XAttrPermissionFilter.checkPermissionForApi(pc, xAttr,
         FSDirectory.isReservedRawName(src));
-    checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
@@ -9260,7 +9243,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     FSPermissionChecker pc = getPermissionChecker();
     XAttrPermissionFilter.checkPermissionForApi(pc, xAttr,
         FSDirectory.isReservedRawName(src));
-    checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46b9393c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index d7d65db..690d7e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -576,6 +576,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
       throw new IOException("create: Pathname too long.  Limit "
           + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
+    namesystem.checkOperation(OperationCategory.WRITE);
     HdfsFileStatus fileStatus = namesystem.startFile(src, new PermissionStatus(
         getRemoteUser().getShortUserName(), null, masked),
         clientName, clientMachine, flag.get(), createParent, replication,
@@ -594,6 +595,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
       stateChangeLog.debug("*DIR* NameNode.append: file "
           +src+" for "+clientName+" at "+clientMachine);
     }
+    namesystem.checkOperation(OperationCategory.WRITE);
     LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine);
     metrics.incrFilesAppended();
     return info;
@@ -749,6 +751,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
       ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
       throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes, newStorageIDs);
   }
   
@@ -781,6 +784,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
       throw new IOException("rename: Pathname too long.  Limit "
           + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
+    namesystem.checkOperation(OperationCategory.WRITE);
     boolean ret = namesystem.renameTo(src, dst);
     if (ret) {
       metrics.incrFilesRenamed();
@@ -791,6 +795,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   @Override // ClientProtocol
   public void concat(String trg, String[] src) throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.concat(trg, src);
   }
   
@@ -805,6 +810,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
       throw new IOException("rename: Pathname too long.  Limit "
           + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.renameTo(src, dst, options);
     metrics.incrFilesRenamed();
   }
@@ -816,6 +822,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
       stateChangeLog.debug("*DIR* Namenode.delete: src=" + src
           + ", recursive=" + recursive);
     }
+    namesystem.checkOperation(OperationCategory.WRITE);
     boolean ret = namesystem.delete(src, recursive);
     if (ret) 
       metrics.incrDeleteFileOps();
@@ -1088,6 +1095,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public void createSymlink(String target, String link, FsPermission dirPerms,
       boolean createParent) throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     metrics.incrCreateSymlinkOps();
     /* We enforce the MAX_PATH_LENGTH limit even though a symlink target 
      * URI may refer to a non-HDFS file system. 
@@ -1408,6 +1416,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
       throw new IOException("createSnapshot: Pathname too long.  Limit "
           + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
+    namesystem.checkOperation(OperationCategory.WRITE);
     metrics.incrCreateSnapshotOps();
     return namesystem.createSnapshot(snapshotRoot, snapshotName);
   }
@@ -1416,6 +1425,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public void deleteSnapshot(String snapshotRoot, String snapshotName)
       throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     metrics.incrDeleteSnapshotOps();
     namesystem.deleteSnapshot(snapshotRoot, snapshotName);
   }
@@ -1444,6 +1454,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
     if (snapshotNewName == null || snapshotNewName.isEmpty()) {
       throw new IOException("The new snapshot name is null or empty.");
     }
+    namesystem.checkOperation(OperationCategory.WRITE);
     metrics.incrRenameSnapshotOps();
     namesystem.renameSnapshot(snapshotRoot, snapshotOldName, snapshotNewName);
   }
@@ -1472,6 +1483,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public long addCacheDirective(
       CacheDirectiveInfo path, EnumSet<CacheFlag> flags) throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     return namesystem.addCacheDirective(path, flags);
   }
 
@@ -1479,12 +1491,14 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public void modifyCacheDirective(
       CacheDirectiveInfo directive, EnumSet<CacheFlag> flags) throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.modifyCacheDirective(directive, flags);
   }
 
   @Override // ClientProtocol
   public void removeCacheDirective(long id) throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.removeCacheDirective(id);
   }
 
@@ -1501,18 +1515,21 @@ class NameNodeRpcServer implements NamenodeProtocols {
   @Override //ClientProtocol
   public void addCachePool(CachePoolInfo info) throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.addCachePool(info);
   }
 
   @Override // ClientProtocol
   public void modifyCachePool(CachePoolInfo info) throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.modifyCachePool(info);
   }
 
   @Override // ClientProtocol
   public void removeCachePool(String cachePoolName) throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.removeCachePool(cachePoolName);
   }
 
@@ -1565,6 +1582,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public void createEncryptionZone(String src, String keyName)
     throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.createEncryptionZone(src, keyName);
   }
 
@@ -1586,6 +1604,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
       throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.setXAttr(src, xAttr, flag);
   }
   
@@ -1605,6 +1624,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
   @Override // ClientProtocol
   public void removeXAttr(String src, XAttr xAttr) throws IOException {
     checkNNStartup();
+    namesystem.checkOperation(OperationCategory.WRITE);
     namesystem.removeXAttr(src, xAttr);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46b9393c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
index bf889f3..7155d60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
@@ -213,7 +213,8 @@ public class TestRetryCacheWithHA {
   abstract class AtMostOnceOp {
     private final String name;
     final DFSClient client;
-    
+    int expectedUpdateCount = 0;
+
     AtMostOnceOp(String name, DFSClient client) {
       this.name = name;
       this.client = client;
@@ -223,6 +224,9 @@ public class TestRetryCacheWithHA {
     abstract void invoke() throws Exception;
     abstract boolean checkNamenodeBeforeReturn() throws Exception;
     abstract Object getResult();
+    int getExpectedCacheUpdateCount() {
+      return expectedUpdateCount;
+    }
   }
   
   /** createSnapshot operaiton */
@@ -601,7 +605,7 @@ public class TestRetryCacheWithHA {
   class DeleteOp extends AtMostOnceOp {
     private final String target;
     private boolean deleted;
-    
+
     DeleteOp(DFSClient client, String target) {
       super("delete", client);
       this.target = target;
@@ -611,12 +615,14 @@ public class TestRetryCacheWithHA {
     void prepare() throws Exception {
       Path p = new Path(target);
       if (!dfs.exists(p)) {
+        expectedUpdateCount++;
         DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
       }
     }
 
     @Override
     void invoke() throws Exception {
+      expectedUpdateCount++;
       deleted = client.delete(target, true);
     }
 
@@ -652,12 +658,14 @@ public class TestRetryCacheWithHA {
     void prepare() throws Exception {
       Path p = new Path(target);
       if (!dfs.exists(p)) {
+        expectedUpdateCount++;
         DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
       }
     }
 
     @Override
     void invoke() throws Exception {
+      expectedUpdateCount++;
       client.createSymlink(target, link, false);
     }
 
@@ -769,11 +777,13 @@ public class TestRetryCacheWithHA {
 
     @Override
     void prepare() throws Exception {
+      expectedUpdateCount++;
       dfs.addCachePool(new CachePoolInfo(directive.getPool()));
     }
 
     @Override
     void invoke() throws Exception {
+      expectedUpdateCount++;
       result = client.addCacheDirective(directive, EnumSet.of(CacheFlag.FORCE));
     }
 
@@ -815,12 +825,15 @@ public class TestRetryCacheWithHA {
 
     @Override
     void prepare() throws Exception {
+      expectedUpdateCount++;
       dfs.addCachePool(new CachePoolInfo(directive.getPool()));
+      expectedUpdateCount++;
       id = client.addCacheDirective(directive, EnumSet.of(CacheFlag.FORCE));
     }
 
     @Override
     void invoke() throws Exception {
+      expectedUpdateCount++;
       client.modifyCacheDirective(
           new CacheDirectiveInfo.Builder().
               setId(id).
@@ -871,12 +884,15 @@ public class TestRetryCacheWithHA {
 
     @Override
     void prepare() throws Exception {
+      expectedUpdateCount++;
       dfs.addCachePool(new CachePoolInfo(directive.getPool()));
+      expectedUpdateCount++;
       id = dfs.addCacheDirective(directive, EnumSet.of(CacheFlag.FORCE));
     }
 
     @Override
     void invoke() throws Exception {
+      expectedUpdateCount++;
       client.removeCacheDirective(id);
     }
 
@@ -918,6 +934,7 @@ public class TestRetryCacheWithHA {
 
     @Override
     void invoke() throws Exception {
+      expectedUpdateCount++;
       client.addCachePool(new CachePoolInfo(pool));
     }
 
@@ -950,11 +967,13 @@ public class TestRetryCacheWithHA {
 
     @Override
     void prepare() throws Exception {
+      expectedUpdateCount++;
       client.addCachePool(new CachePoolInfo(pool).setLimit(10l));
     }
 
     @Override
     void invoke() throws Exception {
+      expectedUpdateCount++;
       client.modifyCachePool(new CachePoolInfo(pool).setLimit(99l));
     }
 
@@ -987,11 +1006,13 @@ public class TestRetryCacheWithHA {
 
     @Override
     void prepare() throws Exception {
+      expectedUpdateCount++;
       client.addCachePool(new CachePoolInfo(pool));
     }
 
     @Override
     void invoke() throws Exception {
+      expectedUpdateCount++;
       client.removeCachePool(pool);
     }
 
@@ -1026,12 +1047,14 @@ public class TestRetryCacheWithHA {
     void prepare() throws Exception {
       Path p = new Path(src);
       if (!dfs.exists(p)) {
+        expectedUpdateCount++;
         DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
       }
     }
 
     @Override
     void invoke() throws Exception {
+      expectedUpdateCount++;
       client.setXAttr(src, "user.key", "value".getBytes(),
           EnumSet.of(XAttrSetFlag.CREATE));
     }
@@ -1068,7 +1091,9 @@ public class TestRetryCacheWithHA {
     void prepare() throws Exception {
       Path p = new Path(src);
       if (!dfs.exists(p)) {
+        expectedUpdateCount++;
         DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
+        expectedUpdateCount++;
         client.setXAttr(src, "user.key", "value".getBytes(),
           EnumSet.of(XAttrSetFlag.CREATE));
       }
@@ -1076,6 +1101,7 @@ public class TestRetryCacheWithHA {
 
     @Override
     void invoke() throws Exception {
+      expectedUpdateCount++;
       client.removeXAttr(src, "user.key");
     }
 
@@ -1312,6 +1338,13 @@ public class TestRetryCacheWithHA {
     assertTrue("CacheUpdated on NN0: " + updatedNN0, updatedNN0 > 0);
     // Cache updated metrics on NN0 should be >0 since NN1 applied the editlog
     assertTrue("CacheUpdated on NN1: " + updatedNN1, updatedNN1 > 0);
+    long expectedUpdateCount = op.getExpectedCacheUpdateCount();
+    if (expectedUpdateCount > 0) {
+      assertEquals("CacheUpdated on NN0: " + updatedNN0, expectedUpdateCount,
+          updatedNN0);
+      assertEquals("CacheUpdated on NN0: " + updatedNN1, expectedUpdateCount,
+          updatedNN1);
+    }
   }
 
   /**


[2/3] hadoop git commit: YARN-2900. Application (Attempt and Container) Not Found in AHS results in Internal Server Error (500). Contributed by Zhijie Shen and Mit Desai

Posted by vi...@apache.org.
YARN-2900. Application (Attempt and Container) Not Found in AHS results
in Internal Server Error (500). Contributed by Zhijie Shen and Mit Desai

(cherry picked from commit 06f8e9cabaf3c05cd7d16215cff47265ea773f39)
(cherry picked from commit 4fee8b320276bac86278e1ae0a3397592a78aa18)
(cherry picked from commit 6c7b625138ce3b262a8c8aa28077074b553638ed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c6a287b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c6a287b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c6a287b

Branch: refs/heads/branch-2.6.1
Commit: 1c6a287bf53c87a769a4d0a992a82c648a2c77dd
Parents: 46b9393
Author: Xuan <xg...@apache.org>
Authored: Sat May 30 21:56:19 2015 -0700
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Thu Sep 3 13:45:06 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../TestApplicationHistoryClientService.java    | 70 +++++++++++++++++++-
 .../hadoop/yarn/server/webapp/WebServices.java  | 27 +++++---
 3 files changed, 89 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c6a287b/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 328a0ed..b8d1f53 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -147,6 +147,9 @@ Release 2.6.1 - UNRELEASED
     YARN-3700. Made generic history service load a number of latest applications
     according to the parameter or the configuration. (Xuan Gong via zjshen)
 
+    YARN-2900. Application (Attempt and Container) Not Found in AHS results
+    in InternalServer Error (500). (Zhijie Shen and Mit Desai via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c6a287b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
index 3a6214f..feab8c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java
@@ -41,6 +41,9 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
+import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
+import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
@@ -54,12 +57,13 @@ public class TestApplicationHistoryClientService {
 
   private static ApplicationHistoryClientService clientService;
   private static TimelineDataManager dataManager;
+  private final static int MAX_APPS = 2;
 
   @BeforeClass
   public static void setup() throws Exception {
     Configuration conf = new YarnConfiguration();
     TimelineStore store =
-        TestApplicationHistoryManagerOnTimelineStore.createStore(2);
+        TestApplicationHistoryManagerOnTimelineStore.createStore(MAX_APPS);
     TimelineACLsManager aclsManager = new TimelineACLsManager(conf);
     dataManager =
         new TimelineDataManager(store, aclsManager);
@@ -72,6 +76,70 @@ public class TestApplicationHistoryClientService {
   }
 
   @Test
+  public void testApplicationNotFound() throws IOException, YarnException {
+    ApplicationId appId = null;
+    appId = ApplicationId.newInstance(0, MAX_APPS + 1);
+    GetApplicationReportRequest request =
+        GetApplicationReportRequest.newInstance(appId);
+    try {
+      @SuppressWarnings("unused")
+      GetApplicationReportResponse response =
+          clientService.getApplicationReport(request);
+      Assert.fail("Exception should have been thrown before we reach here.");
+    } catch (ApplicationNotFoundException e) {
+      //This exception is expected.
+      Assert.assertTrue(e.getMessage().contains(
+          "doesn't exist in the timeline store"));
+    } catch (Exception e) {
+      Assert.fail("Undesired exception caught");
+    }
+  }
+
+  @Test
+  public void testApplicationAttemptNotFound() throws IOException, YarnException {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, MAX_APPS + 1);
+    GetApplicationAttemptReportRequest request =
+        GetApplicationAttemptReportRequest.newInstance(appAttemptId);
+    try {
+      @SuppressWarnings("unused")
+      GetApplicationAttemptReportResponse response =
+          clientService.getApplicationAttemptReport(request);
+      Assert.fail("Exception should have been thrown before we reach here.");
+    } catch (ApplicationAttemptNotFoundException e) {
+      //This Exception is expected
+      System.out.println(e.getMessage());
+      Assert.assertTrue(e.getMessage().contains(
+          "doesn't exist in the timeline store"));
+    } catch (Exception e) {
+      Assert.fail("Undesired exception caught");
+    }
+  }
+
+  @Test
+  public void testContainerNotFound() throws IOException, YarnException {
+   ApplicationId appId = ApplicationId.newInstance(0, 1);
+   ApplicationAttemptId appAttemptId =
+       ApplicationAttemptId.newInstance(appId, 1);
+   ContainerId containerId = ContainerId.newContainerId(appAttemptId,
+       MAX_APPS + 1);
+   GetContainerReportRequest request =
+       GetContainerReportRequest.newInstance(containerId);
+   try {
+   @SuppressWarnings("unused")
+   GetContainerReportResponse response =
+       clientService.getContainerReport(request);
+   } catch (ContainerNotFoundException e) {
+     //This exception is expected
+     Assert.assertTrue(e.getMessage().contains(
+         "doesn't exist in the timeline store"));
+   }  catch (Exception e) {
+      Assert.fail("Undesired exception caught");
+   }
+ }
+
+  @Test
   public void testApplicationReport() throws IOException, YarnException {
     ApplicationId appId = null;
     appId = ApplicationId.newInstance(0, 1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c6a287b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
index 0d6f166..b8d7752 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
@@ -46,6 +46,9 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
+import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
+import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.AppInfo;
@@ -475,17 +478,21 @@ public class WebServices {
 
   private static void rewrapAndThrowException(Exception e) {
     if (e instanceof UndeclaredThrowableException) {
-      if (e.getCause() instanceof AuthorizationException) {
-        throw new ForbiddenException(e.getCause());
-      } else {
-        throw new WebApplicationException(e.getCause());
-      }
+      rewrapAndThrowThrowable(e.getCause());
     } else {
-      if (e instanceof AuthorizationException) {
-        throw new ForbiddenException(e);
-      } else {
-        throw new WebApplicationException(e);
-      }
+      rewrapAndThrowThrowable(e);
+    }
+  }
+
+  private static void rewrapAndThrowThrowable(Throwable t) {
+    if (t instanceof AuthorizationException) {
+      throw new ForbiddenException(t);
+    } if (t instanceof ApplicationNotFoundException ||
+        t instanceof ApplicationAttemptNotFoundException ||
+        t instanceof ContainerNotFoundException) {
+      throw new NotFoundException(t);
+    } else {
+      throw new WebApplicationException(t);
     }
   }
 


[3/3] hadoop git commit: YARN-3725. App submission via REST API is broken in secure mode due to Timeline DT service address is empty. (Zhijie Shen via wangda)

Posted by vi...@apache.org.
YARN-3725. App submission via REST API is broken in secure mode due to Timeline DT service address is empty. (Zhijie Shen via wangda)

(cherry picked from commit 5cc3fced957a8471733e0e9490878bd68429fe24)
(cherry picked from commit a3734f67d35e714690ecdf21d80bce8a355381e3)
(cherry picked from commit 9ccc22e2ac89990f3e7997f1d89594523c66e76a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae0fac3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae0fac3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae0fac3e

Branch: refs/heads/branch-2.6.1
Commit: ae0fac3efa9a86b6582cc9721c857d8ff36a8d10
Parents: 1c6a287
Author: Wangda Tan <wa...@apache.org>
Authored: Sun May 31 16:30:34 2015 -0700
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Thu Sep 3 13:50:14 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../client/api/impl/TimelineClientImpl.java     | 26 +++++++++++++++++++-
 .../TestTimelineAuthenticationFilter.java       | 11 +++++++++
 3 files changed, 39 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae0fac3e/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b8d1f53..4ce9f9f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -150,6 +150,9 @@ Release 2.6.1 - UNRELEASED
     YARN-2900. Application (Attempt and Container) Not Found in AHS results
     in InternalServer Error (500). (Zhijie Shen and Mit Desai via xgong)
 
+    YARN-3725. App submission via REST API is broken in secure mode due to
+    Timeline DT service address is empty. (Zhijie Shen via wangda)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae0fac3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index f5c85c1..6bf858a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.net.ConnectException;
 import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URL;
 import java.net.URLConnection;
@@ -44,6 +45,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
@@ -362,6 +364,12 @@ public class TimelineClientImpl extends TimelineClient {
   public long renewDelegationToken(
       final Token<TimelineDelegationTokenIdentifier> timelineDT)
           throws IOException, YarnException {
+    final boolean isTokenServiceAddrEmpty =
+        timelineDT.getService().toString().isEmpty();
+    final String scheme = isTokenServiceAddrEmpty ? null
+        : (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http");
+    final InetSocketAddress address = isTokenServiceAddrEmpty ? null
+        : SecurityUtil.getTokenServiceAddr(timelineDT);
     PrivilegedExceptionAction<Long> renewDTAction =
         new PrivilegedExceptionAction<Long>() {
 
@@ -377,6 +385,11 @@ public class TimelineClientImpl extends TimelineClient {
             DelegationTokenAuthenticatedURL authUrl =
                 new DelegationTokenAuthenticatedURL(authenticator,
                     connConfigurator);
+            // If the token service address is not available, fall back to use
+            // the configured service address.
+            final URI serviceURI = isTokenServiceAddrEmpty ? resURI
+                : new URI(scheme, null, address.getHostName(),
+                address.getPort(), RESOURCE_URI_STR, null, null);
             return authUrl
                 .renewDelegationToken(resURI.toURL(), token, doAsUser);
           }
@@ -389,6 +402,12 @@ public class TimelineClientImpl extends TimelineClient {
   public void cancelDelegationToken(
       final Token<TimelineDelegationTokenIdentifier> timelineDT)
           throws IOException, YarnException {
+    final boolean isTokenServiceAddrEmpty =
+        timelineDT.getService().toString().isEmpty();
+    final String scheme = isTokenServiceAddrEmpty ? null
+        : (YarnConfiguration.useHttps(this.getConfig()) ? "https" : "http");
+    final InetSocketAddress address = isTokenServiceAddrEmpty ? null
+        : SecurityUtil.getTokenServiceAddr(timelineDT);
     PrivilegedExceptionAction<Void> cancelDTAction =
         new PrivilegedExceptionAction<Void>() {
 
@@ -404,7 +423,12 @@ public class TimelineClientImpl extends TimelineClient {
             DelegationTokenAuthenticatedURL authUrl =
                 new DelegationTokenAuthenticatedURL(authenticator,
                     connConfigurator);
-            authUrl.cancelDelegationToken(resURI.toURL(), token, doAsUser);
+            // If the token service address is not available, fall back to use
+            // the configured service address.
+            final URI serviceURI = isTokenServiceAddrEmpty ? resURI
+                : new URI(scheme, null, address.getHostName(),
+                address.getPort(), RESOURCE_URI_STR, null, null);
+            authUrl.cancelDelegationToken(serviceURI.toURL(), token, doAsUser);
             return null;
           }
         };

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae0fac3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
index c93e8f2..063f512 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
@@ -240,12 +240,21 @@ public class TestTimelineAuthenticationFilter {
     Assert.assertEquals(new Text(HTTP_USER), tDT.getOwner());
 
     // Renew token
+    Assert.assertFalse(token.getService().toString().isEmpty());
+    // Renew the token from the token service address
     long renewTime1 = httpUserClient.renewDelegationToken(token);
     Thread.sleep(100);
+    token.setService(new Text());
+    Assert.assertTrue(token.getService().toString().isEmpty());
+    // If the token service address is not avaiable, it still can be renewed
+    // from the configured address
     long renewTime2 = httpUserClient.renewDelegationToken(token);
     Assert.assertTrue(renewTime1 < renewTime2);
 
     // Cancel token
+    Assert.assertTrue(token.getService().toString().isEmpty());
+    // If the token service address is not avaiable, it still can be canceled
+    // from the configured address
     httpUserClient.cancelDelegationToken(token);
     // Renew should not be successful because the token is canceled
     try {
@@ -280,6 +289,8 @@ public class TestTimelineAuthenticationFilter {
     Assert.assertTrue(renewTime1 < renewTime2);
 
     // Cancel token
+    Assert.assertFalse(tokenToRenew.getService().toString().isEmpty());
+    // Cancel the token from the token service address
     fooUserClient.cancelDelegationToken(tokenToRenew);
 
     // Renew should not be successful because the token is canceled