You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by vi...@apache.org on 2013/08/12 23:26:09 UTC

svn commit: r1513258 [6/6] - in /hadoop/common/branches/YARN-321/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/tomcat/ROOT/ hadoop-hdfs-nfs/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/src/m...

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java Mon Aug 12 21:25:49 2013
@@ -167,7 +167,7 @@ public class TestInterDatanodeProtocol {
       cluster.waitActive();
 
       //create a file
-      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+      DistributedFileSystem dfs = cluster.getFileSystem();
       String filestr = "/foo";
       Path filepath = new Path(filestr);
       DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
@@ -225,7 +225,7 @@ public class TestInterDatanodeProtocol {
   }
 
   /** Test 
-   * {@link FsDatasetImpl#initReplicaRecovery(String, ReplicaMap, Block, long)}
+   * {@link FsDatasetImpl#initReplicaRecovery(String, ReplicaMap, Block, long, long)}
    */
   @Test
   public void testInitReplicaRecovery() throws IOException {
@@ -246,8 +246,9 @@ public class TestInterDatanodeProtocol {
       final ReplicaInfo originalInfo = map.get(bpid, b);
 
       final long recoveryid = gs + 1;
-      final ReplicaRecoveryInfo recoveryInfo = FsDatasetImpl.initReplicaRecovery(
-          bpid, map, blocks[0], recoveryid);
+      final ReplicaRecoveryInfo recoveryInfo = FsDatasetImpl
+          .initReplicaRecovery(bpid, map, blocks[0], recoveryid,
+              DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
       assertEquals(originalInfo, recoveryInfo);
 
       final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(bpid, b);
@@ -256,7 +257,9 @@ public class TestInterDatanodeProtocol {
 
       //recover one more time 
       final long recoveryid2 = gs + 2;
-      final ReplicaRecoveryInfo recoveryInfo2 = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid2);
+      final ReplicaRecoveryInfo recoveryInfo2 = FsDatasetImpl
+          .initReplicaRecovery(bpid, map, blocks[0], recoveryid2,
+              DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
       assertEquals(originalInfo, recoveryInfo2);
 
       final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(bpid, b);
@@ -265,7 +268,8 @@ public class TestInterDatanodeProtocol {
       
       //case RecoveryInProgressException
       try {
-        FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid);
+        FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
+            DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
         Assert.fail();
       }
       catch(RecoveryInProgressException ripe) {
@@ -276,7 +280,9 @@ public class TestInterDatanodeProtocol {
     { // BlockRecoveryFI_01: replica not found
       final long recoveryid = gs + 1;
       final Block b = new Block(firstblockid - 1, length, gs);
-      ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid);
+      ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b,
+          recoveryid,
+          DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
       Assert.assertNull("Data-node should not have this replica.", r);
     }
     
@@ -284,7 +290,8 @@ public class TestInterDatanodeProtocol {
       final long recoveryid = gs - 1;
       final Block b = new Block(firstblockid + 1, length, gs);
       try {
-        FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid);
+        FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
+            DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
         Assert.fail();
       }
       catch(IOException ioe) {
@@ -297,7 +304,8 @@ public class TestInterDatanodeProtocol {
       final long recoveryid = gs + 1;
       final Block b = new Block(firstblockid, length, gs+1);
       try {
-        FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid);
+        FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid,
+            DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
         fail("InitReplicaRecovery should fail because replica's " +
         		"gs is less than the block's gs");
       } catch (IOException e) {
@@ -321,7 +329,7 @@ public class TestInterDatanodeProtocol {
       String bpid = cluster.getNamesystem().getBlockPoolId();
 
       //create a file
-      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+      DistributedFileSystem dfs = cluster.getFileSystem();
       String filestr = "/foo";
       Path filepath = new Path(filestr);
       DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java Mon Aug 12 21:25:49 2013
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.namenode.INodeId;
 
 /**
  * 
@@ -97,8 +96,9 @@ public class CreateEditsLog {
         dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
         editLog.logMkDir(currentDir, dirInode);
       }
-      editLog.logOpenFile(filePath, new INodeFileUnderConstruction(
-          inodeId.nextValue(), p, replication, 0, blockSize, "", "", null));
+      editLog.logOpenFile(filePath,
+          new INodeFileUnderConstruction(inodeId.nextValue(), p, replication,
+              0, blockSize, "", "", null), false);
       editLog.logCloseFile(filePath, inode);
 
       if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Mon Aug 12 21:25:49 2013
@@ -121,7 +121,7 @@ public class NNThroughputBenchmark {
     File excludeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE,
       "exclude"));
     if(!excludeFile.exists()) {
-      if(!excludeFile.getParentFile().mkdirs())
+      if(!excludeFile.getParentFile().exists() && !excludeFile.getParentFile().mkdirs())
         throw new IOException("NNThroughputBenchmark: cannot mkdir " + excludeFile);
     }
     new FileOutputStream(excludeFile).close();

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Mon Aug 12 21:25:49 2013
@@ -155,7 +155,7 @@ public class TestEditLog {
         INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
             namesystem.allocateNewInodeId(), p, replication, blockSize, 0, "",
             "", null);
-        editLog.logOpenFile("/filename" + (startIndex + i), inode);
+        editLog.logOpenFile("/filename" + (startIndex + i), inode, false);
         editLog.logCloseFile("/filename" + (startIndex + i), inode);
         editLog.logSync();
       }
@@ -912,14 +912,14 @@ public class TestEditLog {
       log.setMetricsForTests(mockMetrics);
 
       for (int i = 0; i < 400; i++) {
-        log.logDelete(oneKB, 1L);
+        log.logDelete(oneKB, 1L, false);
       }
       // After ~400KB, we're still within the 512KB buffer size
       Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong());
       
       // After ~400KB more, we should have done an automatic sync
       for (int i = 0; i < 400; i++) {
-        log.logDelete(oneKB, 1L);
+        log.logDelete(oneKB, 1L, false);
       }
       Mockito.verify(mockMetrics, Mockito.times(1)).addSync(Mockito.anyLong());
 

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Mon Aug 12 21:25:49 2013
@@ -292,7 +292,7 @@ public class TestFSEditLogLoader {
         long thisTxId = spyLog.getLastWrittenTxId() + 1;
         offsetToTxId.put(trueOffset, thisTxId);
         System.err.println("txid " + thisTxId + " at offset " + trueOffset);
-        spyLog.logDelete("path" + i, i);
+        spyLog.logDelete("path" + i, i, false);
         spyLog.logSync();
       }
     } finally {

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Mon Aug 12 21:25:49 2013
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFal
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.BufferedReader;
 import java.io.ByteArrayOutputStream;
@@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -81,6 +83,8 @@ import org.apache.log4j.RollingFileAppen
 import org.junit.Test;
 
 import com.google.common.collect.Sets;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.*;
 
 /**
  * A JUnit test for doing fsck
@@ -876,6 +880,59 @@ public class TestFsck {
     }
   }
 
+  /** Test fsck with FileNotFound */
+  @Test
+  public void testFsckFileNotFound() throws Exception {
+
+    // Number of replicas to actually start
+    final short NUM_REPLICAS = 1;
+
+    Configuration conf = new Configuration();
+    NameNode namenode = mock(NameNode.class);
+    NetworkTopology nettop = mock(NetworkTopology.class);
+    Map<String,String[]> pmap = new HashMap<String, String[]>();
+    Writer result = new StringWriter();
+    PrintWriter out = new PrintWriter(result, true);
+    InetAddress remoteAddress = InetAddress.getLocalHost();
+    FSNamesystem fsName = mock(FSNamesystem.class);
+    when(namenode.getNamesystem()).thenReturn(fsName);
+    when(fsName.getBlockLocations(anyString(), anyLong(), anyLong(),
+        anyBoolean(), anyBoolean(), anyBoolean())).
+        thenThrow(new FileNotFoundException()) ;
+
+    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
+        NUM_REPLICAS, (short)1, remoteAddress);
+
+    String pathString = "/tmp/testFile";
+
+    long length = 123L;
+    boolean isDir = false;
+    int blockReplication = 1;
+    long blockSize = 128 *1024L;
+    long modTime = 123123123L;
+    long accessTime = 123123120L;
+    FsPermission perms = FsPermission.getDefault();
+    String owner = "foo";
+    String group = "bar";
+    byte [] symlink = null;
+    byte [] path = new byte[128];
+    path = DFSUtil.string2Bytes(pathString);
+    long fileId = 312321L;
+    int numChildren = 1;
+
+    HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
+        blockSize, modTime, accessTime, perms, owner, group, symlink, path,
+        fileId, numChildren);
+    Result res = new Result(conf);
+
+    try {
+      fsck.check(pathString, file, res);
+    } catch (Exception e) {
+      fail("Unexpected exception "+ e.getMessage());
+    }
+    assertTrue(res.toString().contains("HEALTHY"));
+  }
+
   /** Test fsck with symlinks in the filesystem */
   @Test
   public void testFsckSymlink() throws Exception {

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java Mon Aug 12 21:25:49 2013
@@ -18,9 +18,11 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.BufferedReader;
 import java.io.DataInputStream;
+import java.io.File;
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -31,6 +33,7 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -47,6 +50,7 @@ public class TestMetaSave {
   static final int blockSize = 8192;
   private static MiniDFSCluster cluster = null;
   private static FileSystem fileSys = null;
+  private static FSNamesystem namesystem = null;
 
   private void createFile(FileSystem fileSys, Path name) throws IOException {
     FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
@@ -72,6 +76,7 @@ public class TestMetaSave {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
     cluster.waitActive();
     fileSys = cluster.getFileSystem();
+    namesystem = cluster.getNamesystem();
   }
 
   /**
@@ -79,9 +84,6 @@ public class TestMetaSave {
    */
   @Test
   public void testMetaSave() throws IOException, InterruptedException {
-
-    final FSNamesystem namesystem = cluster.getNamesystem();
-
     for (int i = 0; i < 2; i++) {
       Path file = new Path("/filestatus" + i);
       createFile(fileSys, file);
@@ -95,9 +97,8 @@ public class TestMetaSave {
     namesystem.metaSave("metasave.out.txt");
 
     // Verification
-    String logFile = System.getProperty("hadoop.log.dir") + "/"
-        + "metasave.out.txt";
-    FileInputStream fstream = new FileInputStream(logFile);
+    FileInputStream fstream = new FileInputStream(getLogFile(
+      "metasave.out.txt"));
     DataInputStream in = new DataInputStream(fstream);
     BufferedReader reader = null;
     try {
@@ -124,9 +125,6 @@ public class TestMetaSave {
   @Test
   public void testMetasaveAfterDelete()
       throws IOException, InterruptedException {
-
-    final FSNamesystem namesystem = cluster.getNamesystem();
-
     for (int i = 0; i < 2; i++) {
       Path file = new Path("/filestatus" + i);
       createFile(fileSys, file);
@@ -142,11 +140,10 @@ public class TestMetaSave {
     namesystem.metaSave("metasaveAfterDelete.out.txt");
 
     // Verification
-    String logFile = System.getProperty("hadoop.log.dir") + "/"
-        + "metasaveAfterDelete.out.txt";
     BufferedReader reader = null;
     try {
-      FileInputStream fstream = new FileInputStream(logFile);
+      FileInputStream fstream = new FileInputStream(getLogFile(
+        "metasaveAfterDelete.out.txt"));
       DataInputStream in = new DataInputStream(fstream);
       reader = new BufferedReader(new InputStreamReader(in));
       reader.readLine();
@@ -166,6 +163,42 @@ public class TestMetaSave {
     }
   }
 
+  /**
+   * Tests that metasave overwrites the output file (not append).
+   */
+  @Test
+  public void testMetaSaveOverwrite() throws Exception {
+    // metaSave twice.
+    namesystem.metaSave("metaSaveOverwrite.out.txt");
+    namesystem.metaSave("metaSaveOverwrite.out.txt");
+
+    // Read output file.
+    FileInputStream fis = null;
+    InputStreamReader isr = null;
+    BufferedReader rdr = null;
+    try {
+      fis = new FileInputStream(getLogFile("metaSaveOverwrite.out.txt"));
+      isr = new InputStreamReader(fis);
+      rdr = new BufferedReader(isr);
+
+      // Validate that file was overwritten (not appended) by checking for
+      // presence of only one "Live Datanodes" line.
+      boolean foundLiveDatanodesLine = false;
+      String line = rdr.readLine();
+      while (line != null) {
+        if (line.startsWith("Live Datanodes")) {
+          if (foundLiveDatanodesLine) {
+            fail("multiple Live Datanodes lines, output file not overwritten");
+          }
+          foundLiveDatanodesLine = true;
+        }
+        line = rdr.readLine();
+      }
+    } finally {
+      IOUtils.cleanup(null, rdr, isr, fis);
+    }
+  }
+
   @AfterClass
   public static void tearDown() throws IOException {
     if (fileSys != null)
@@ -173,4 +206,14 @@ public class TestMetaSave {
     if (cluster != null)
       cluster.shutdown();
   }
+
+  /**
+   * Returns a File for the given name inside the log directory.
+   * 
+   * @param name String file name
+   * @return File for given name inside log directory
+   */
+  private static File getLogFile(String name) {
+    return new File(System.getProperty("hadoop.log.dir"), name);
+  }
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Mon Aug 12 21:25:49 2013
@@ -126,6 +126,10 @@ public class TestNameNodeMXBean {
       // get attribute "CompileInfo"
       String compileInfo = (String) mbs.getAttribute(mxbeanName, "CompileInfo");
       assertEquals("Bad value for CompileInfo", fsn.getCompileInfo(), compileInfo);
+      // get attribute CorruptFiles
+      String corruptFiles = (String) (mbs.getAttribute(mxbeanName,
+          "CorruptFiles"));
+      assertEquals("Bad value for CorruptFiles", fsn.getCorruptFiles(), corruptFiles);
       // get attribute NameDirStatuses
       String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
           "NameDirStatuses"));

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java Mon Aug 12 21:25:49 2013
@@ -30,8 +30,6 @@ import java.io.RandomAccessFile;
 import java.util.HashSet;
 import java.util.Set;
 
-import junit.framework.Assert;
-
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -273,7 +271,7 @@ public class TestNameNodeRecovery {
     } 
     
     public int getMaxOpSize() {
-      return 30;
+      return 36;
     }
   }
 

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java Mon Aug 12 21:25:49 2013
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java Mon Aug 12 21:25:49 2013
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INode;

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java Mon Aug 12 21:25:49 2013
@@ -19,6 +19,8 @@
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import java.util.ArrayList;
+
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.junit.*;

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java Mon Aug 12 21:25:49 2013
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java Mon Aug 12 21:25:49 2013
@@ -20,10 +20,8 @@ package org.apache.hadoop.security;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
@@ -32,17 +30,14 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
 
@@ -303,92 +298,4 @@ public class TestPermission {
       return false;
     }
   }
-
-  @Test(timeout = 30000)
-  public void testSymlinkPermissions() throws Exception {
-    final Configuration conf = new HdfsConfiguration();
-    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-    conf.set(FsPermission.UMASK_LABEL, "000");
-    MiniDFSCluster cluster = null;
-    FileSystem fs = null;
-
-    try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
-      cluster.waitActive();
-      FileContext fc = FileContext.getFileContext(conf);
-      fs = FileSystem.get(conf);
-
-      // Create initial test files
-      final Path testDir = new Path("/symtest");
-      final Path target = new Path(testDir, "target");
-      final Path link = new Path(testDir, "link");
-      fs.mkdirs(testDir);
-      DFSTestUtil.createFile(fs, target, 1024, (short)3, 0xBEEFl);
-      fc.createSymlink(target, link, false);
-
-      // Non-super user to run commands with
-      final UserGroupInformation user = UserGroupInformation
-          .createRemoteUser("myuser");
-
-      // Case 1: parent directory is read-only
-      fs.setPermission(testDir, new FsPermission((short)0555));
-      try {
-        user.doAs(new PrivilegedExceptionAction<Object>() {
-          @Override
-          public Object run() throws IOException {
-            FileContext myfc = FileContext.getFileContext(conf);
-            myfc.delete(link, false);
-            return null;
-          }
-        });
-        fail("Deleted symlink without write permissions on parent!");
-      } catch (AccessControlException e) {
-        GenericTestUtils.assertExceptionContains("Permission denied", e);
-      }
-
-      // Case 2: target is not readable
-      fs.setPermission(target, new FsPermission((short)0000));
-      try {
-        user.doAs(new PrivilegedExceptionAction<Object>() {
-          @Override
-          public Object run() throws IOException {
-            FileContext myfc = FileContext.getFileContext(conf);
-            myfc.open(link).read();
-            return null;
-          }
-        });
-        fail("Read link target even though target does not have" +
-            " read permissions!");
-      } catch (IOException e) {
-        GenericTestUtils.assertExceptionContains("Permission denied", e);
-      }
-
-      // Case 3: parent directory is read/write
-      fs.setPermission(testDir, new FsPermission((short)0777));
-      user.doAs(new PrivilegedExceptionAction<Object>() {
-        @Override
-        public Object run() throws IOException {
-          FileContext myfc = FileContext.getFileContext(conf);
-          myfc.delete(link, false);
-          return null;
-        }
-      });
-      // Make sure only the link was deleted
-      assertTrue("Target should not have been deleted!",
-          fc.util().exists(target));
-      assertFalse("Link should have been deleted!",
-          fc.util().exists(link));
-    } finally {
-      try {
-        if(fs != null) fs.close();
-      } catch(Exception e) {
-        LOG.error(StringUtils.stringifyException(e));
-      }
-      try {
-        if(cluster != null) cluster.shutdown();
-      } catch(Exception e) {
-        LOG.error(StringUtils.stringifyException(e));
-      }
-    }
-  }
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml Mon Aug 12 21:25:49 2013
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <EDITS>
-  <EDITS_VERSION>-46</EDITS_VERSION>
+  <EDITS_VERSION>-47</EDITS_VERSION>
   <RECORD>
     <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
     <DATA>
@@ -13,8 +13,8 @@
       <TXID>2</TXID>
       <DELEGATION_KEY>
         <KEY_ID>1</KEY_ID>
-        <EXPIRY_DATE>1372798673941</EXPIRY_DATE>
-        <KEY>247c47b8bf6b89ec</KEY>
+        <EXPIRY_DATE>1375509063810</EXPIRY_DATE>
+        <KEY>4d47710649039b98</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -24,8 +24,8 @@
       <TXID>3</TXID>
       <DELEGATION_KEY>
         <KEY_ID>2</KEY_ID>
-        <EXPIRY_DATE>1372798673944</EXPIRY_DATE>
-        <KEY>ef1a35da6b4fc327</KEY>
+        <EXPIRY_DATE>1375509063812</EXPIRY_DATE>
+        <KEY>38cbb1d8fd90fcb2</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -37,16 +37,18 @@
       <INODEID>16386</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107474972</MTIME>
-      <ATIME>1372107474972</ATIME>
+      <MTIME>1374817864805</MTIME>
+      <ATIME>1374817864805</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>8</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -57,13 +59,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107474983</MTIME>
-      <ATIME>1372107474972</ATIME>
+      <MTIME>1374817864816</MTIME>
+      <ATIME>1374817864805</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -76,7 +78,9 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create_u\0001;F431</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1372107474986</TIMESTAMP>
+      <TIMESTAMP>1374817864818</TIMESTAMP>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>10</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -85,7 +89,9 @@
       <TXID>7</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1372107474989</TIMESTAMP>
+      <TIMESTAMP>1374817864822</TIMESTAMP>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>11</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -95,9 +101,9 @@
       <LENGTH>0</LENGTH>
       <INODEID>16387</INODEID>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1372107474991</TIMESTAMP>
+      <TIMESTAMP>1374817864825</TIMESTAMP>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>493</MODE>
       </PERMISSION_STATUS>
@@ -130,6 +136,8 @@
       <TXID>12</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>16</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -139,6 +147,8 @@
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
       <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>17</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -147,6 +157,8 @@
       <TXID>14</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>18</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -157,16 +169,18 @@
       <INODEID>16388</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475007</MTIME>
-      <ATIME>1372107475007</ATIME>
+      <MTIME>1374817864846</MTIME>
+      <ATIME>1374817864846</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>19</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -177,13 +191,13 @@
       <INODEID>0</INODEID>
       <PATH>/file_create_u\0001;F431</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475009</MTIME>
-      <ATIME>1372107475007</ATIME>
+      <MTIME>1374817864848</MTIME>
+      <ATIME>1374817864846</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -239,8 +253,10 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create_u\0001;F431</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1372107475019</TIMESTAMP>
+      <TIMESTAMP>1374817864860</TIMESTAMP>
       <OPTIONS>NONE</OPTIONS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>26</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -251,16 +267,18 @@
       <INODEID>16389</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475023</MTIME>
-      <ATIME>1372107475023</ATIME>
+      <MTIME>1374817864864</MTIME>
+      <ATIME>1374817864864</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>28</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -287,6 +305,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1001</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -318,6 +338,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1002</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -354,6 +376,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -364,8 +388,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475091</MTIME>
-      <ATIME>1372107475023</ATIME>
+      <MTIME>1374817864927</MTIME>
+      <ATIME>1374817864864</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -385,7 +409,7 @@
         <GENSTAMP>1003</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -399,16 +423,18 @@
       <INODEID>16390</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475093</MTIME>
-      <ATIME>1372107475093</ATIME>
+      <MTIME>1374817864929</MTIME>
+      <ATIME>1374817864929</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>41</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -435,6 +461,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1004</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -466,6 +494,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1005</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -502,6 +532,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -512,8 +544,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475110</MTIME>
-      <ATIME>1372107475093</ATIME>
+      <MTIME>1374817864947</MTIME>
+      <ATIME>1374817864929</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -533,7 +565,7 @@
         <GENSTAMP>1006</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -547,16 +579,18 @@
       <INODEID>16391</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475112</MTIME>
-      <ATIME>1372107475112</ATIME>
+      <MTIME>1374817864950</MTIME>
+      <ATIME>1374817864950</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>53</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -583,6 +617,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1007</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -614,6 +650,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1008</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -650,6 +688,8 @@
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -660,8 +700,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475131</MTIME>
-      <ATIME>1372107475112</ATIME>
+      <MTIME>1374817864966</MTIME>
+      <ATIME>1374817864950</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
@@ -681,7 +721,7 @@
         <GENSTAMP>1009</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -693,11 +733,13 @@
       <TXID>56</TXID>
       <LENGTH>0</LENGTH>
       <TRG>/file_concat_target</TRG>
-      <TIMESTAMP>1372107475133</TIMESTAMP>
+      <TIMESTAMP>1374817864967</TIMESTAMP>
       <SOURCES>
         <SOURCE1>/file_concat_0</SOURCE1>
         <SOURCE2>/file_concat_1</SOURCE2>
       </SOURCES>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>64</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -708,13 +750,15 @@
       <INODEID>16392</INODEID>
       <PATH>/file_symlink</PATH>
       <VALUE>/file_concat_target</VALUE>
-      <MTIME>1372107475137</MTIME>
-      <ATIME>1372107475137</ATIME>
+      <MTIME>1374817864971</MTIME>
+      <ATIME>1374817864971</ATIME>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>511</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>65</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -724,14 +768,14 @@
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>aagarwal</OWNER>
+        <OWNER>jing</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1372107475140</ISSUE_DATE>
-        <MAX_DATE>1372712275140</MAX_DATE>
+        <ISSUE_DATE>1374817864974</ISSUE_DATE>
+        <MAX_DATE>1375422664974</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
-      <EXPIRY_TIME>1372193875140</EXPIRY_TIME>
+      <EXPIRY_TIME>1374904264974</EXPIRY_TIME>
     </DATA>
   </RECORD>
   <RECORD>
@@ -741,14 +785,14 @@
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>aagarwal</OWNER>
+        <OWNER>jing</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1372107475140</ISSUE_DATE>
-        <MAX_DATE>1372712275140</MAX_DATE>
+        <ISSUE_DATE>1374817864974</ISSUE_DATE>
+        <MAX_DATE>1375422664974</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
-      <EXPIRY_TIME>1372193875208</EXPIRY_TIME>
+      <EXPIRY_TIME>1374904265012</EXPIRY_TIME>
     </DATA>
   </RECORD>
   <RECORD>
@@ -758,11 +802,11 @@
       <DELEGATION_TOKEN_IDENTIFIER>
         <KIND>HDFS_DELEGATION_TOKEN</KIND>
         <SEQUENCE_NUMBER>1</SEQUENCE_NUMBER>
-        <OWNER>aagarwal</OWNER>
+        <OWNER>jing</OWNER>
         <RENEWER>JobTracker</RENEWER>
         <REALUSER></REALUSER>
-        <ISSUE_DATE>1372107475140</ISSUE_DATE>
-        <MAX_DATE>1372712275140</MAX_DATE>
+        <ISSUE_DATE>1374817864974</ISSUE_DATE>
+        <MAX_DATE>1375422664974</MAX_DATE>
         <MASTER_KEY_ID>2</MASTER_KEY_ID>
       </DELEGATION_TOKEN_IDENTIFIER>
     </DATA>
@@ -773,18 +817,20 @@
       <TXID>61</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>16393</INODEID>
-      <PATH>/written_file</PATH>
+      <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475214</MTIME>
-      <ATIME>1372107475214</ATIME>
+      <MTIME>1374817865017</MTIME>
+      <ATIME>1374817865017</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1676409172_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
+      <RPC_CLIENTID>5245793a-984b-4264-8d7c-7890775547a0</RPC_CLIENTID>
+      <RPC_CALLID>69</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -805,178 +851,42 @@
     <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
       <TXID>64</TXID>
-      <PATH>/written_file</PATH>
+      <PATH>/hard-lease-recovery-test</PATH>
       <BLOCK>
         <BLOCK_ID>1073741834</BLOCK_ID>
         <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_CLOSE</OPCODE>
+    <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
     <DATA>
       <TXID>65</TXID>
-      <LENGTH>0</LENGTH>
-      <INODEID>0</INODEID>
-      <PATH>/written_file</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475221</MTIME>
-      <ATIME>1372107475214</ATIME>
-      <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME></CLIENT_NAME>
-      <CLIENT_MACHINE></CLIENT_MACHINE>
-      <BLOCK>
-        <BLOCK_ID>1073741834</BLOCK_ID>
-        <NUM_BYTES>9</NUM_BYTES>
-        <GENSTAMP>1010</GENSTAMP>
-      </BLOCK>
-      <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
-        <GROUPNAME>supergroup</GROUPNAME>
-        <MODE>420</MODE>
-      </PERMISSION_STATUS>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_ADD</OPCODE>
-    <DATA>
-      <TXID>66</TXID>
-      <LENGTH>0</LENGTH>
-      <INODEID>16393</INODEID>
-      <PATH>/written_file</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475221</MTIME>
-      <ATIME>1372107475214</ATIME>
-      <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
-      <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
+      <PATH>/hard-lease-recovery-test</PATH>
       <BLOCK>
         <BLOCK_ID>1073741834</BLOCK_ID>
-        <NUM_BYTES>9</NUM_BYTES>
+        <NUM_BYTES>0</NUM_BYTES>
         <GENSTAMP>1010</GENSTAMP>
       </BLOCK>
-      <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
-        <GROUPNAME>supergroup</GROUPNAME>
-        <MODE>420</MODE>
-      </PERMISSION_STATUS>
+      <RPC_CLIENTID></RPC_CLIENTID>
+      <RPC_CALLID>-2</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
     <DATA>
-      <TXID>67</TXID>
+      <TXID>66</TXID>
       <GENSTAMPV2>1011</GENSTAMPV2>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
-    <DATA>
-      <TXID>68</TXID>
-      <PATH>/written_file</PATH>
-      <BLOCK>
-        <BLOCK_ID>1073741834</BLOCK_ID>
-        <NUM_BYTES>9</NUM_BYTES>
-        <GENSTAMP>1011</GENSTAMP>
-      </BLOCK>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_CLOSE</OPCODE>
-    <DATA>
-      <TXID>69</TXID>
-      <LENGTH>0</LENGTH>
-      <INODEID>0</INODEID>
-      <PATH>/written_file</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475272</MTIME>
-      <ATIME>1372107475221</ATIME>
-      <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME></CLIENT_NAME>
-      <CLIENT_MACHINE></CLIENT_MACHINE>
-      <BLOCK>
-        <BLOCK_ID>1073741834</BLOCK_ID>
-        <NUM_BYTES>26</NUM_BYTES>
-        <GENSTAMP>1011</GENSTAMP>
-      </BLOCK>
-      <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
-        <GROUPNAME>supergroup</GROUPNAME>
-        <MODE>420</MODE>
-      </PERMISSION_STATUS>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_ADD</OPCODE>
-    <DATA>
-      <TXID>70</TXID>
-      <LENGTH>0</LENGTH>
-      <INODEID>16394</INODEID>
-      <PATH>/hard-lease-recovery-test</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1372107475275</MTIME>
-      <ATIME>1372107475275</ATIME>
-      <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-1834501254_1</CLIENT_NAME>
-      <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
-      <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
-        <GROUPNAME>supergroup</GROUPNAME>
-        <MODE>420</MODE>
-      </PERMISSION_STATUS>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
-    <DATA>
-      <TXID>71</TXID>
-      <BLOCK_ID>1073741835</BLOCK_ID>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
-    <DATA>
-      <TXID>72</TXID>
-      <GENSTAMPV2>1012</GENSTAMPV2>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
-    <DATA>
-      <TXID>73</TXID>
-      <PATH>/hard-lease-recovery-test</PATH>
-      <BLOCK>
-        <BLOCK_ID>1073741835</BLOCK_ID>
-        <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1012</GENSTAMP>
-      </BLOCK>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_UPDATE_BLOCKS</OPCODE>
-    <DATA>
-      <TXID>74</TXID>
-      <PATH>/hard-lease-recovery-test</PATH>
-      <BLOCK>
-        <BLOCK_ID>1073741835</BLOCK_ID>
-        <NUM_BYTES>0</NUM_BYTES>
-        <GENSTAMP>1012</GENSTAMP>
-      </BLOCK>
-    </DATA>
-  </RECORD>
-  <RECORD>
-    <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
-    <DATA>
-      <TXID>75</TXID>
-      <GENSTAMPV2>1013</GENSTAMPV2>
-    </DATA>
-  </RECORD>
-  <RECORD>
     <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
-      <TXID>76</TXID>
-      <LEASEHOLDER>DFSClient_NONMAPREDUCE_-1834501254_1</LEASEHOLDER>
+      <TXID>67</TXID>
+      <LEASEHOLDER>DFSClient_NONMAPREDUCE_-1676409172_1</LEASEHOLDER>
       <PATH>/hard-lease-recovery-test</PATH>
       <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
     </DATA>
@@ -984,23 +894,23 @@
   <RECORD>
     <OPCODE>OP_CLOSE</OPCODE>
     <DATA>
-      <TXID>77</TXID>
+      <TXID>68</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1372107477870</MTIME>
-      <ATIME>1372107475275</ATIME>
+      <MTIME>1374817867688</MTIME>
+      <ATIME>1374817865017</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME></CLIENT_NAME>
       <CLIENT_MACHINE></CLIENT_MACHINE>
       <BLOCK>
-        <BLOCK_ID>1073741835</BLOCK_ID>
+        <BLOCK_ID>1073741834</BLOCK_ID>
         <NUM_BYTES>11</NUM_BYTES>
-        <GENSTAMP>1013</GENSTAMP>
+        <GENSTAMP>1011</GENSTAMP>
       </BLOCK>
       <PERMISSION_STATUS>
-        <USERNAME>aagarwal</USERNAME>
+        <USERNAME>jing</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
@@ -1009,7 +919,7 @@
   <RECORD>
     <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
     <DATA>
-      <TXID>78</TXID>
+      <TXID>69</TXID>
     </DATA>
   </RECORD>
 </EDITS>

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/pom.xml?rev=1513258&r1=1513257&r2=1513258&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/pom.xml (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/pom.xml Mon Aug 12 21:25:49 2013
@@ -20,12 +20,12 @@ http://maven.apache.org/xsd/maven-4.0.0.
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.2.0-SNAPSHOT</version>
+    <version>2.3.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs-project</artifactId>
-  <version>2.2.0-SNAPSHOT</version>
+  <version>2.3.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Project</description>
   <name>Apache Hadoop HDFS Project</name>
   <packaging>pom</packaging>