You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2013/07/30 10:01:01 UTC

svn commit: r1508336 [2/2] - in /hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/m...

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Tue Jul 30 08:01:00 2013
@@ -249,7 +249,7 @@ public class SecondaryNameNode implement
     checkpointImage.recoverCreate(commandLineOpts.shouldFormat());
     checkpointImage.deleteTempEdits();
     
-    namesystem = new FSNamesystem(conf, checkpointImage);
+    namesystem = new FSNamesystem(conf, checkpointImage, true);
 
     // Initialize other scheduling parameters from the configuration
     checkpointConf = new CheckpointConf(conf);

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java Tue Jul 30 08:01:00 2013
@@ -126,7 +126,7 @@ class ImageLoaderCurrent implements Imag
                                       new SimpleDateFormat("yyyy-MM-dd HH:mm");
   private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
       -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
-      -40, -41, -42, -43, -44, -45, -46 };
+      -40, -41, -42, -43, -44, -45, -46, -47 };
   private int imageVersion = 0;
   
   private final Map<Long, String> subtreeMap = new HashMap<Long, String>();

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Tue Jul 30 08:01:00 2013
@@ -57,8 +57,11 @@ import org.apache.hadoop.fs.BlockLocatio
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -942,4 +945,102 @@ public class DFSTestUtil {
       return new DFSTestUtil(nFiles, maxLevels, maxSize, minSize);
     }
   }
+  
+  /**
+   * Run a set of operations and generate all edit logs
+   */
+  public static void runOperations(MiniDFSCluster cluster,
+      DistributedFileSystem filesystem, Configuration conf, long blockSize, 
+      int nnIndex) throws IOException {
+    // create FileContext for rename2
+    FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
+    
+    // OP_ADD 0
+    final Path pathFileCreate = new Path("/file_create");
+    FSDataOutputStream s = filesystem.create(pathFileCreate);
+    // OP_CLOSE 9
+    s.close();
+    // OP_RENAME_OLD 1
+    final Path pathFileMoved = new Path("/file_moved");
+    filesystem.rename(pathFileCreate, pathFileMoved);
+    // OP_DELETE 2
+    filesystem.delete(pathFileMoved, false);
+    // OP_MKDIR 3
+    Path pathDirectoryMkdir = new Path("/directory_mkdir");
+    filesystem.mkdirs(pathDirectoryMkdir);
+    // OP_ALLOW_SNAPSHOT 29
+    filesystem.allowSnapshot(pathDirectoryMkdir);
+    // OP_DISALLOW_SNAPSHOT 30
+    filesystem.disallowSnapshot(pathDirectoryMkdir);
+    // OP_CREATE_SNAPSHOT 26
+    String ssName = "snapshot1";
+    filesystem.allowSnapshot(pathDirectoryMkdir);
+    filesystem.createSnapshot(pathDirectoryMkdir, ssName);
+    // OP_RENAME_SNAPSHOT 28
+    String ssNewName = "snapshot2";
+    filesystem.renameSnapshot(pathDirectoryMkdir, ssName, ssNewName);
+    // OP_DELETE_SNAPSHOT 27
+    filesystem.deleteSnapshot(pathDirectoryMkdir, ssNewName);
+    // OP_SET_REPLICATION 4
+    s = filesystem.create(pathFileCreate);
+    s.close();
+    filesystem.setReplication(pathFileCreate, (short)1);
+    // OP_SET_PERMISSIONS 7
+    Short permission = 0777;
+    filesystem.setPermission(pathFileCreate, new FsPermission(permission));
+    // OP_SET_OWNER 8
+    filesystem.setOwner(pathFileCreate, new String("newOwner"), null);
+    // OP_CLOSE 9 see above
+    // OP_SET_GENSTAMP 10 see above
+    // OP_SET_NS_QUOTA 11 obsolete
+    // OP_CLEAR_NS_QUOTA 12 obsolete
+    // OP_TIMES 13
+    long mtime = 1285195527000L; // Wed, 22 Sep 2010 22:45:27 GMT
+    long atime = mtime;
+    filesystem.setTimes(pathFileCreate, mtime, atime);
+    // OP_SET_QUOTA 14
+    filesystem.setQuota(pathDirectoryMkdir, 1000L, 
+        HdfsConstants.QUOTA_DONT_SET);
+    // OP_RENAME 15
+    fc.rename(pathFileCreate, pathFileMoved, Rename.NONE);
+    // OP_CONCAT_DELETE 16
+    Path   pathConcatTarget = new Path("/file_concat_target");
+    Path[] pathConcatFiles  = new Path[2];
+    pathConcatFiles[0]      = new Path("/file_concat_0");
+    pathConcatFiles[1]      = new Path("/file_concat_1");
+
+    long length = blockSize * 3; // multiple of blocksize for concat
+    short replication = 1;
+    long seed = 1;
+    DFSTestUtil.createFile(filesystem, pathConcatTarget, length, replication,
+        seed);
+    DFSTestUtil.createFile(filesystem, pathConcatFiles[0], length, replication,
+        seed);
+    DFSTestUtil.createFile(filesystem, pathConcatFiles[1], length, replication,
+        seed);
+    filesystem.concat(pathConcatTarget, pathConcatFiles);
+    
+    // OP_SYMLINK 17
+    Path pathSymlink = new Path("/file_symlink");
+    fc.createSymlink(pathConcatTarget, pathSymlink, false);
+    
+    // OP_REASSIGN_LEASE 22
+    String filePath = "/hard-lease-recovery-test";
+    byte[] bytes = "foo-bar-baz".getBytes();
+    DFSClientAdapter.stopLeaseRenewer(filesystem);
+    FSDataOutputStream leaseRecoveryPath = filesystem.create(new Path(filePath));
+    leaseRecoveryPath.write(bytes);
+    leaseRecoveryPath.hflush();
+    // Set the hard lease timeout to 1 second.
+    cluster.setLeasePeriod(60 * 1000, 1000, nnIndex);
+    // wait for lease recovery to complete
+    LocatedBlocks locatedBlocks;
+    do {
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {}
+      locatedBlocks = DFSClientAdapter.callGetBlockLocations(
+          cluster.getNameNodeRpc(nnIndex), filePath, 0L, bytes.length);
+    } while (locatedBlocks.isUnderConstruction());
+  }
 }

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Jul 30 08:01:00 2013
@@ -2038,6 +2038,10 @@ public class MiniDFSCluster {
     NameNodeAdapter.setLeasePeriod(getNamesystem(), soft, hard);
   }
   
+  public void setLeasePeriod(long soft, long hard, int nnIndex) {
+    NameNodeAdapter.setLeasePeriod(getNamesystem(nnIndex), soft, hard);
+  }
+  
   public void setWaitSafeMode(boolean wait) {
     this.waitSafeMode = wait;
   }

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Tue Jul 30 08:01:00 2013
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.namenode.INodeId;
 
 /**
  * 
@@ -97,8 +96,9 @@ public class CreateEditsLog {
         dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
         editLog.logMkDir(currentDir, dirInode);
       }
-      editLog.logOpenFile(filePath, new INodeFileUnderConstruction(
-          inodeId.nextValue(), p, replication, 0, blockSize, "", "", null));
+      editLog.logOpenFile(filePath,
+          new INodeFileUnderConstruction(inodeId.nextValue(), p, replication,
+              0, blockSize, "", "", null), false);
       editLog.logCloseFile(filePath, inode);
 
       if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java Tue Jul 30 08:01:00 2013
@@ -18,9 +18,9 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.*;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
@@ -28,7 +28,9 @@ import static org.mockito.Mockito.spy;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -64,7 +66,7 @@ public class TestCommitBlockSynchronizat
         any(INodeFileUnderConstruction.class),
         any(BlockInfo.class));
     doReturn("").when(namesystemSpy).persistBlocks(
-        any(INodeFileUnderConstruction.class));
+        any(INodeFileUnderConstruction.class), anyBoolean());
     doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
 
     return namesystemSpy;
@@ -127,7 +129,6 @@ public class TestCommitBlockSynchronizat
     INodeFileUnderConstruction file = mock(INodeFileUnderConstruction.class);
     Block block = new Block(blockId, length, genStamp);
     FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
-    DatanodeDescriptor[] targets = new DatanodeDescriptor[0];
     DatanodeID[] newTargets = new DatanodeID[0];
 
     ExtendedBlock lastBlock = new ExtendedBlock();
@@ -148,7 +149,6 @@ public class TestCommitBlockSynchronizat
     INodeFileUnderConstruction file = mock(INodeFileUnderConstruction.class);
     Block block = new Block(blockId, length, genStamp);
     FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
-    DatanodeDescriptor[] targets = new DatanodeDescriptor[0];
     DatanodeID[] newTargets = new DatanodeID[0];
 
     ExtendedBlock lastBlock = new ExtendedBlock();

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Tue Jul 30 08:01:00 2013
@@ -155,7 +155,7 @@ public class TestEditLog {
         INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
             namesystem.allocateNewInodeId(), p, replication, blockSize, 0, "",
             "", null);
-        editLog.logOpenFile("/filename" + (startIndex + i), inode);
+        editLog.logOpenFile("/filename" + (startIndex + i), inode, false);
         editLog.logCloseFile("/filename" + (startIndex + i), inode);
         editLog.logSync();
       }
@@ -912,14 +912,14 @@ public class TestEditLog {
       log.setMetricsForTests(mockMetrics);
 
       for (int i = 0; i < 400; i++) {
-        log.logDelete(oneKB, 1L);
+        log.logDelete(oneKB, 1L, false);
       }
       // After ~400KB, we're still within the 512KB buffer size
       Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong());
       
       // After ~400KB more, we should have done an automatic sync
       for (int i = 0; i < 400; i++) {
-        log.logDelete(oneKB, 1L);
+        log.logDelete(oneKB, 1L, false);
       }
       Mockito.verify(mockMetrics, Mockito.times(1)).addSync(Mockito.anyLong());
 

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Tue Jul 30 08:01:00 2013
@@ -292,7 +292,7 @@ public class TestFSEditLogLoader {
         long thisTxId = spyLog.getLastWrittenTxId() + 1;
         offsetToTxId.put(trueOffset, thisTxId);
         System.err.println("txid " + thisTxId + " at offset " + trueOffset);
-        spyLog.logDelete("path" + i, i);
+        spyLog.logDelete("path" + i, i, false);
         spyLog.logSync();
       }
     } finally {

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java Tue Jul 30 08:01:00 2013
@@ -30,8 +30,6 @@ import java.io.RandomAccessFile;
 import java.util.HashSet;
 import java.util.Set;
 
-import junit.framework.Assert;
-
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -273,7 +271,7 @@ public class TestNameNodeRecovery {
     } 
     
     public int getMaxOpSize() {
-      return 30;
+      return 36;
     }
   }
 

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Tue Jul 30 08:01:00 2013
@@ -19,12 +19,17 @@ package org.apache.hadoop.hdfs.server.na
 
 
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
@@ -32,19 +37,21 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.ipc.ClientId;
 import org.apache.hadoop.ipc.RPC.RpcKind;
-import org.apache.hadoop.ipc.RetryCache;
+import org.apache.hadoop.ipc.RetryCache.CacheEntry;
 import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.LightWeightCache;
 import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
 
 /**
@@ -61,19 +68,20 @@ import org.junit.Test;
  * request, a new callId is generated using {@link #newCall()}.
  */
 public class TestNamenodeRetryCache {
-  private static final byte[] CLIENT_ID = StringUtils.getUuidBytes();
+  private static final byte[] CLIENT_ID = ClientId.getClientId();
   private static MiniDFSCluster cluster;
   private static FSNamesystem namesystem;
   private static PermissionStatus perm = new PermissionStatus(
       "TestNamenodeRetryCache", null, FsPermission.getDefault());
-  private static FileSystem filesystem;
+  private static DistributedFileSystem filesystem;
   private static int callId = 100;
+  private static Configuration conf = new HdfsConfiguration();
+  private static final int BlockSize = 512;
   
   /** Start a cluster */
-  @BeforeClass
-  public static void setup() throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512");
+  @Before
+  public void setup() throws Exception {
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
@@ -109,8 +117,8 @@ public class TestNamenodeRetryCache {
   }
   
   private void concatSetup(String file1, String file2) throws Exception {
-    DFSTestUtil.createFile(filesystem, new Path(file1), 512, (short)1, 0L);
-    DFSTestUtil.createFile(filesystem, new Path(file2), 512, (short)1, 0L);
+    DFSTestUtil.createFile(filesystem, new Path(file1), BlockSize, (short)1, 0L);
+    DFSTestUtil.createFile(filesystem, new Path(file2), BlockSize, (short)1, 0L);
   }
   
   /**
@@ -192,19 +200,19 @@ public class TestNamenodeRetryCache {
     // Two retried calls succeed
     newCall();
     HdfsFileStatus status = namesystem.startFile(src, perm, "holder",
-        "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, 512);
+        "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize);
     Assert.assertEquals(status, namesystem.startFile(src, perm, 
         "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), 
-        true, (short) 1, 512));
+        true, (short) 1, BlockSize));
     Assert.assertEquals(status, namesystem.startFile(src, perm, 
         "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), 
-        true, (short) 1, 512));
+        true, (short) 1, BlockSize));
     
     // A non-retried call fails
     newCall();
     try {
       namesystem.startFile(src, perm, "holder", "clientmachine",
-          EnumSet.of(CreateFlag.CREATE), true, (short) 1, 512);
+          EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize);
       Assert.fail("testCreate - expected exception is not thrown");
     } catch (IOException e) {
       // expected
@@ -352,4 +360,41 @@ public class TestNamenodeRetryCache {
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, false);
     Assert.assertNull(FSNamesystem.initRetryCache(conf));
   }
+  
+  /**
+   * After run a set of operations, restart NN and check if the retry cache has
+   * been rebuilt based on the editlog.
+   */
+  @Test
+  public void testRetryCacheRebuild() throws Exception {
+    DFSTestUtil.runOperations(cluster, filesystem, conf, BlockSize, 0);
+    
+    LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
+        (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
+    assertEquals(14, cacheSet.size());
+    
+    Map<CacheEntry, CacheEntry> oldEntries = 
+        new HashMap<CacheEntry, CacheEntry>();
+    Iterator<CacheEntry> iter = cacheSet.iterator();
+    while (iter.hasNext()) {
+      CacheEntry entry = iter.next();
+      oldEntries.put(entry, entry);
+    }
+    
+    // restart NameNode
+    cluster.restartNameNode();
+    cluster.waitActive();
+    namesystem = cluster.getNamesystem();
+    
+    // check retry cache
+    assertTrue(namesystem.hasRetryCache());
+    cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
+        .getRetryCache().getCacheSet();
+    assertEquals(14, cacheSet.size());
+    iter = cacheSet.iterator();
+    while (iter.hasNext()) {
+      CacheEntry entry = iter.next();
+      assertTrue(oldEntries.containsKey(entry));
+    }
+  }
 }

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1508336&r1=1508335&r2=1508336&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Tue Jul 30 08:01:00 2013
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <EDITS>
-  <EDITS_VERSION>-46</EDITS_VERSION>
+  <EDITS_VERSION>-47</EDITS_VERSION>
   <RECORD>
     <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
     <DATA>
@@ -13,8 +13,8 @@
       <TXID>2</TXID>
       <DELEGATION_KEY>
         <KEY_ID>1</KEY_ID>
-        <EXPIRY_DATE>1372798673941</EXPIRY_DATE>
-        <KEY>247c47b8bf6b89ec</KEY>
+        <EXPIRY_DATE>1375509063810</EXPIRY_DATE>
+        <KEY>4d47710649039b98</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -24,8 +24,8 @@
       <TXID>3</TXID>
       <DELEGATION_KEY>
         <KEY_ID>2</KEY_ID>
-        <EXPIRY_DATE>1372798673944</EXPIRY_DATE>
-        <KEY>ef1a35da6b4fc327</KEY>
+        <EXPIRY_DATE>1375509063812</EXPIRY_DATE>
+        <KEY>38cbb1d8fd90fcb2</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -37,16 +37,18 @@
       <INODEID>16386</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107474972</MTIME>
-      <ATIME>1372107474972</ATIME>
+      <MTIME>1374817864805</MTIME>
+      <ATIME>1374817864805</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>8</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -57,13 +59,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107474983</MTIME>
-      <ATIME>1372107474972</ATIME>
+      <MTIME>1374817864816</MTIME>
+      <ATIME>1374817864805</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -76,7 +78,9 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create_u\0001;F431</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1372107474986</TIMESTAMP>
+      <TIMESTAMP>1374817864818</TIMESTAMP>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>10</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -85,7 +89,9 @@
       <TXID>7</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1372107474989</TIMESTAMP>
+      <TIMESTAMP>1374817864822</TIMESTAMP>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>11</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -95,9 +101,9 @@
       <LENGTH>0</LENGTH>
       <INODEID>16387</INODEID>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1372107474991</TIMESTAMP>
+      <TIMESTAMP>1374817864825</TIMESTAMP>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>493</MODE>
       </PERMISSION_STATUS>
@@ -130,6 +136,8 @@
       <TXID>12</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>16</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -139,6 +147,8 @@
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
       <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>17</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -147,6 +157,8 @@
       <TXID>14</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>18</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -157,16 +169,18 @@
       <INODEID>16388</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475007</MTIME>
-      <ATIME>1372107475007</ATIME>
+      <MTIME>1374817864846</MTIME>
+      <ATIME>1374817864846</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>19</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -177,13 +191,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475009</MTIME>
-      <ATIME>1372107475007</ATIME>
+      <MTIME>1374817864848</MTIME>
+      <ATIME>1374817864846</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -239,8 +253,10 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create_u\0001;F431</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1372107475019</TIMESTAMP>
+      <TIMESTAMP>1374817864860</TIMESTAMP>
       <OPTIONS>NONE</OPTIONS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>26</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -251,16 +267,18 @@
       <INODEID>16389</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475023</MTIME>
-      <ATIME>1372107475023</ATIME>
+      <MTIME>1374817864864</MTIME>
+      <ATIME>1374817864864</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>28</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -287,6 +305,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1001</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -318,6 +338,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1002</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -354,6 +376,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -364,8 +388,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475091</MTIME>
-      <ATIME>1372107475023</ATIME>
+      <MTIME>1374817864927</MTIME>
+      <ATIME>1374817864864</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -385,7 +409,7 @@
         <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -399,16 +423,18 @@
       <INODEID>16390</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475093</MTIME>
-      <ATIME>1372107475093</ATIME>
+      <MTIME>1374817864929</MTIME>
+      <ATIME>1374817864929</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>41</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -435,6 +461,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -466,6 +494,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -502,6 +532,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -512,8 +544,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475110</MTIME>
-      <ATIME>1372107475093</ATIME>
+      <MTIME>1374817864947</MTIME>
+      <ATIME>1374817864929</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -533,7 +565,7 @@
         <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -547,16 +579,18 @@
       <INODEID>16391</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475112</MTIME>
-      <ATIME>1372107475112</ATIME>
+      <MTIME>1374817864950</MTIME>
+      <ATIME>1374817864950</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>53</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -583,6 +617,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1007</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -614,6 +650,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -650,6 +688,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -660,8 +700,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475131</MTIME>
-      <ATIME>1372107475112</ATIME>
+      <MTIME>1374817864966</MTIME>
+      <ATIME>1374817864950</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -681,7 +721,7 @@
         <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -693,11 +733,13 @@
       <TXID>56</TXID>
       <LENGTH>0</LENGTH>
       <TRG>/file_concat_target</TRG>
-      <TIMESTAMP>1372107475133</TIMESTAMP>
+      <TIMESTAMP>1374817864967</TIMESTAMP>
       <SOURCES>
         <SOURCE1>/file_concat_0</SOURCE1>
         <SOURCE2>/file_concat_1</SOURCE2>
       </SOURCES>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>64</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -708,13 +750,15 @@
       <INODEID>16392</INODEID>
       <PATH>/file_symlink</PATH>
       <VALUE>/file_concat_target</VALUE>
-      <MTIME>1372107475137</MTIME>
-      <ATIME>1372107475137</ATIME>
+      <MTIME>1374817864971</MTIME>
+      <ATIME>1374817864971</ATIME>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>511</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>65</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -724,14 +768,14 @@
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>aagarwal</OWNER>
+        <OWNER>jing</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1372107475140</ISSUE_DATE>
-        <MAX_DATE>1372712275140</MAX_DATE>
+        <ISSUE_DATE>1374817864974</ISSUE_DATE>
+        <MAX_DATE>1375422664974</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
-      <EXPIRY_TIME>1372193875140</EXPIRY_TIME>
+      <EXPIRY_TIME>1374904264974</EXPIRY_TIME>
     </DATA>
   </RECORD>
   <RECORD>
@@ -741,14 +785,14 @@
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>aagarwal</OWNER>
+        <OWNER>jing</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1372107475140</ISSUE_DATE>
-        <MAX_DATE>1372712275140</MAX_DATE>
+        <ISSUE_DATE>1374817864974</ISSUE_DATE>
+        <MAX_DATE>1375422664974</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
-      <EXPIRY_TIME>1372193875208</EXPIRY_TIME>
+      <EXPIRY_TIME>1374904265012</EXPIRY_TIME>
     </DATA>
   </RECORD>
   <RECORD>
@@ -758,11 +802,11 @@
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>aagarwal</OWNER>
+        <OWNER>jing</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1372107475140</ISSUE_DATE>
-        <MAX_DATE>1372712275140</MAX_DATE>
+        <ISSUE_DATE>1374817864974</ISSUE_DATE>
+        <MAX_DATE>1375422664974</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
     </DATA>
@@ -773,18 +817,20 @@
       <TXID>61</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>16393</INODEID>
-      <PATH>/written_file</PATH>
+      <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475214</MTIME>
-      <ATIME>1372107475214</ATIME>
+      <MTIME>1374817865017</MTIME>
+      <ATIME>1374817865017</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>69</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -805,178 +851,42 @@
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
       <TXID>64</TXID>
-      <PATH>/written_file</PATH>
+      <PATH>/hard-lease-recovery-test</PATH>
       <BLOCK>
         <BLOCK_ID>1073741834</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_CLOSE</OPCODE>
+    <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
       <TXID>65</TXID>
-      <LENGTH>0</LENGTH>
-      <INODEID>0</INODEID>
-      <PATH>/written_file</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475221</MTIME>
-      <ATIME>1372107475214</ATIME>
-      <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME></CLIENT_NAME>
-      <CLIENT_MACHINE></CLIENT_MACHINE>
-      <BLOCK>
-        <BLOCK_ID>1073741834</BLOCK_ID>
-        <NUM_BYTES>9</NUM_BYTES>
-        <GENSTAMP>1010</GENSTAMP>
-      </BLOCK>
-      <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
-        <GROUPNAME>supergroup</GROUPNAME>
-        <MODE>420</MODE>
-      </PERMISSION_STATUS>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_ADD</OPCODE>
-    <DATA>
-      <TXID>66</TXID>
-      <LENGTH>0</LENGTH>
-      <INODEID>16393</INODEID>
-      <PATH>/written_file</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475221</MTIME>
-      <ATIME>1372107475214</ATIME>
-      <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
-      <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
+      <PATH>/hard-lease-recovery-test</PATH>
       <BLOCK>
         <BLOCK_ID>1073741834</BLOCK_ID>
-        <NUM_BYTES>9</NUM_BYTES>
+        <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
-      <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
-        <GROUPNAME>supergroup</GROUPNAME>
-        <MODE>420</MODE>
-      </PERMISSION_STATUS>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>67</TXID>
+      <TXID>66</TXID>
       <GENSTAMPV2>1011</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
-    <DATA>
-      <TXID>68</TXID>
-      <PATH>/written_file</PATH>
-      <BLOCK>
-        <BLOCK_ID>1073741834</BLOCK_ID>
-        <NUM_BYTES>9</NUM_BYTES>
-        <GENSTAMP>1011</GENSTAMP>
-      </BLOCK>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_CLOSE</OPCODE>
-    <DATA>
-      <TXID>69</TXID>
-      <LENGTH>0</LENGTH>
-      <INODEID>0</INODEID>
-      <PATH>/written_file</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475272</MTIME>
-      <ATIME>1372107475221</ATIME>
-      <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME></CLIENT_NAME>
-      <CLIENT_MACHINE></CLIENT_MACHINE>
-      <BLOCK>
-        <BLOCK_ID>1073741834</BLOCK_ID>
-        <NUM_BYTES>26</NUM_BYTES>
-        <GENSTAMP>1011</GENSTAMP>
-      </BLOCK>
-      <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
-        <GROUPNAME>supergroup</GROUPNAME>
-        <MODE>420</MODE>
-      </PERMISSION_STATUS>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_ADD</OPCODE>
-    <DATA>
-      <TXID>70</TXID>
-      <LENGTH>0</LENGTH>
-      <INODEID>16394</INODEID>
-      <PATH>/hard-lease-recovery-test</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475275</MTIME>
-      <ATIME>1372107475275</ATIME>
-      <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
-      <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
-      <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
-        <GROUPNAME>supergroup</GROUPNAME>
-        <MODE>420</MODE>
-      </PERMISSION_STATUS>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
-    <DATA>
-      <TXID>71</TXID>
-      <BLOCK_ID>1073741835</BLOCK_ID>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
-    <DATA>
-      <TXID>72</TXID>
-      <GENSTAMPV2>1012</GENSTAMPV2>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
-    <DATA>
-      <TXID>73</TXID>
-      <PATH>/hard-lease-recovery-test</PATH>
-      <BLOCK>
-        <BLOCK_ID>1073741835</BLOCK_ID>
-        <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1012</GENSTAMP>
-      </BLOCK>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
-    <DATA>
-      <TXID>74</TXID>
-      <PATH>/hard-lease-recovery-test</PATH>
-      <BLOCK>
-        <BLOCK_ID>1073741835</BLOCK_ID>
-        <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1012</GENSTAMP>
-      </BLOCK>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
-    <DATA>
-      <TXID>75</TXID>
-      <GENSTAMPV2>1013</GENSTAMPV2>
-    </DATA>
-  </RECORD>
-  <RECORD>
     <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
-      <TXID>76</TXID>
-      <LEASEHOLDER>DFSClient_NONMAPREDUCE_-1834501254_1</LEASEHOLDER>
+      <TXID>67</TXID>
+      <LEASEHOLDER>DFSClient_NONMAPREDUCE_-1676409172_1</LEASEHOLDER>
       <PATH>/hard-lease-recovery-test</PATH>
       <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
     </DATA>
@@ -984,23 +894,23 @@
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>77</TXID>
+      <TXID>68</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107477870</MTIME>
-      <ATIME>1372107475275</ATIME>
+      <MTIME>1374817867688</MTIME>
+      <ATIME>1374817865017</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <BLOCK>
-        <BLOCK_ID>1073741835</BLOCK_ID>
+        <BLOCK_ID>1073741834</BLOCK_ID>
         <NUM_BYTES>11</NUM_BYTES>
-        <GENSTAMP>1013</GENSTAMP>
+        <GENSTAMP>1011</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -1009,7 +919,7 @@
   <RECORD>
     <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
     <DATA>
-      <TXID>78</TXID>
+      <TXID>69</TXID>
     </DATA>
   </RECORD>
 </EDITS>